repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jdwittenauer/ionyx | tests/prophet_regressor_test.py | 1 | 1082 | import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV, cross_val_score
from ionyx.contrib import ProphetRegressor
from ionyx.contrib import SuppressOutput
from ionyx.contrib import TimeSeriesSplit
from ionyx.datasets import DataSetLoader
print('Beginning prophet regressor test...')
data, X, y = DataSetLoader.load_time_series()
prophet = ProphetRegressor(n_changepoints=0)
with SuppressOutput():
prophet.fit(X, y)
print('Model score = {0}'.format(mean_absolute_error(y, prophet.predict(X))))
cv = TimeSeriesSplit(n_splits=3)
with SuppressOutput():
score = cross_val_score(prophet, X, y, cv=cv)
print('Cross-validation score = {0}'.format(score))
param_grid = [
{
'n_changepoints': [0, 25]
}
]
grid = GridSearchCV(prophet, param_grid=param_grid, cv=cv, return_train_score=True)
with SuppressOutput():
grid.fit(X, y)
results = pd.DataFrame(grid.cv_results_)
results = results.sort_values(by='mean_test_score', ascending=False)
print('Grid search results:')
print(results)
print('Done.')
| apache-2.0 |
rabarona/incubator-spot | spot-setup/migration/migrate_old_flow_data.py | 7 | 11020 | #!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
import fnmatch
import re
import pandas as pd
import datetime
from utilities import util
old_oa_path=sys.argv[1]
staging_db=sys.argv[2]
hdfs_staging_path=sys.argv[3]
dest_db = sys.argv[4]
impala_daemon = sys.argv[5]
# Execution example:
#./migrate_old_flow_data.py '/home/spotuser/incubator-spot_old/spot-oa' 'spot_migration' '/user/spotuser/spot_migration/' 'migrated' 'node01'
def main():
log = util.get_logger('SPOT.MIGRATE.FLOW')
cur_path = os.path.dirname(os.path.realpath(__file__))
new_spot_path = os.path.split(os.path.split(cur_path)[0])[0]
new_oa_path = '{0}/spot-oa'.format(new_spot_path)
log.info('New Spot OA path: {0}'.format(new_oa_path))
old_spot_path = os.path.split(old_oa_path)[0]
log.info("Creating HDFS paths for Impala tables")
util.create_hdfs_folder('{0}/flow/scores'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/chords'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/edge'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/summary'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/storyboard'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/threat_investigation'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/flow/timeline'.format(hdfs_staging_path),log)
util.execute_cmd('hdfs dfs -setfacl -R -m user:impala:rwx {0}'.format(hdfs_staging_path),log)
log.info("Creating Staging tables in Impala")
util.execute_cmd('impala-shell -i {0} --var=hpath={1} --var=dbname={2} -c -f create_flow_migration_tables.hql'.format(impala_daemon, hdfs_staging_path, staging_db),log)
## Flow Ingest Summary
log.info('Processing Flow Ingest Summary')
ing_sum_path='{0}/data/flow/ingest_summary/'.format(old_oa_path)
pattern='is_??????.csv'
staging_table_name = 'flow_ingest_summary_tmp'
dest_table_name = 'flow_ingest_summary'
if os.path.exists(ing_sum_path):
for file in fnmatch.filter(os.listdir(ing_sum_path), pattern):
log.info('Processing file: {0}'.format(file))
filepath='{0}{1}'.format(ing_sum_path, file)
df = pd.read_csv(filepath)
s = df.iloc[:,0]
l_dates = list(s.unique())
l_dates = map(lambda x: x[0:10].strip(), l_dates)
l_dates = filter(lambda x: re.match('\d{4}[-/]\d{2}[-/]\d{1}', x), l_dates)
s_dates = set(l_dates)
for date_str in s_dates:
dt = datetime.datetime.strptime(date_str, '%Y-%m-%d')
log.info('Processing day: {0} {1} {2} {3}'.format(date_str, dt.year, dt.month, dt.day))
records = df[df['date'].str.contains(date_str)]
filename = "ingest_summary_{0}{1}{2}.csv".format(dt.year, dt.month, dt.day)
records.to_csv(filename, index=False)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tdate, total FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
os.remove(filename)
## Iterating days
days_path='{0}/data/flow/'.format(old_oa_path)
if os.path.exists(days_path):
for day_folder in fnmatch.filter(os.listdir(days_path), '2*'):
print day_folder
dt = datetime.datetime.strptime(day_folder, '%Y%m%d')
log.info('Processing day: {0} {1} {2} {3}'.format(day_folder, dt.year, dt.month, dt.day))
full_day_path = '{0}{1}'.format(days_path,day_folder)
## Flow Scores and Flow Threat Investigation
filename = '{0}/flow_scores.csv'.format(full_day_path)
if os.path.isfile(filename):
log.info("Processing Flow Scores")
staging_table_name = 'flow_scores_tmp'
dest_table_name = 'flow_scores'
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tstart,srcip,dstip,sport,dport,proto,ipkt,ibyt,opkt,obyt,score,rank,srcIpInternal,destIpInternal,srcGeo,dstGeo,srcDomain,dstDomain,srcIP_rep,dstIP_rep FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Processing Flow Threat Investigation")
staging_table_name = 'flow_scores_tmp'
dest_table_name = 'flow_threat_investigation'
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tstart,srcip,dstip,sport,dport,sev FROM {5}.{6} WHERE sev > 0;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
# Flow Chords
log.info("Processing Flow Chords")
staging_table_name = 'flow_chords_tmp'
dest_table_name = 'flow_chords'
for file in fnmatch.filter(os.listdir(full_day_path), 'chord*.tsv'):
ip = re.findall("chord-(\S+).tsv", file)[0]
ip = ip.replace('_', '.')
log.info("Processing File: {0} with IP:{1}".format(file, ip))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT '{5}', srcip, dstip, ibyt, ipkt FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, ip, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
## Flow Edge
log.info("Processing Flow Edge")
staging_table_name = 'flow_edge_tmp'
dest_table_name = 'flow_edge'
pattern = 'edge*.tsv'
edge_files = fnmatch.filter(os.listdir(full_day_path), pattern)
for file in edge_files:
parts = (re.findall("edge-(\S+).tsv", file)[0]).split('-')
hh = int(parts[2])
mn = int(parts[3])
log.info("Processing File: {0} with HH: {1} and MN: {2}".format(file, hh, mn))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tstart, srcip, dstip, sport, dport, proto, flags, tos, ibyt, ipkt, input, output, rip, obyt, opkt, {5}, {6} FROM {7}.{8} WHERE srcip is not NULL;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, hh, mn, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
##flow_storyboard
log.info("Processing Flow Storyboard")
staging_table_name = 'flow_storyboard_tmp'
dest_table_name = 'flow_storyboard'
filename = '{0}/threats.csv'.format(full_day_path)
if os.path.isfile(filename):
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT ip_threat, title, text FROM {5}.{6};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
##flow_timeline
log.info("Processing Flow Timeline")
staging_table_name = 'flow_timeline_tmp'
dest_table_name = 'flow_timeline'
for file in fnmatch.filter(os.listdir(full_day_path), 'sbdet*.tsv'):
ip = re.findall("sbdet-(\S+).tsv", file)[0]
log.info("Processing File: {0} with IP:{1}".format(file, ip))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT '{5}', tstart, tend, srcip, dstip, proto, sport, dport, ipkt, ibyt FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, ip, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Dropping staging tables")
util.execute_cmd('impala-shell -i {0} --var=dbname={1} -c -f drop_flow_migration_tables.hql'.format(impala_daemon, staging_db),log)
log.info("Removing staging tables' path in HDFS")
util.execute_cmd('hadoop fs -rm -r {0}/flow/'.format(hdfs_staging_path),log)
log.info("Moving CSV data to backup folder")
util.execute_cmd('mkdir {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('cp -r {0}/data/flow/ {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('rm -r {0}/data/flow/'.format(old_oa_path),log)
log.info("Invalidating metadata in Impala to refresh tables content")
util.execute_cmd('impala-shell -i {0} -q "INVALIDATE METADATA;"'.format(impala_daemon),log)
log.info("Creating ipynb template structure and copying advanced mode and threat investigation ipynb templates for each pre-existing day in the new Spot location")
ipynb_pipeline_path = '{0}/ipynb/flow/'.format(old_oa_path)
if os.path.exists(ipynb_pipeline_path):
for folder in os.listdir(ipynb_pipeline_path):
log.info("Creating ipynb flow folders in new Spot locaiton: {0}".format(folder))
util.execute_cmd('mkdir -p {0}/ipynb/flow/{1}/'.format(new_oa_path, folder),log)
log.info("Copying advanced mode ipynb template")
util.execute_cmd('cp {0}/oa/flow/ipynb_templates/Advanced_Mode_master.ipynb {0}/ipynb/flow/{1}/Advanced_Mode.ipynb'.format(new_oa_path, folder),log)
log.info("Copying threat investigation ipynb template")
util.execute_cmd('cp {0}/oa/flow/ipynb_templates/Threat_Investigation_master.ipynb {0}/ipynb/flow/{1}/Threat_Investigation.ipynb'.format(new_oa_path, folder),log)
if __name__=='__main__':
main()
| apache-2.0 |
brianlsharp/MissionPlanner | Lib/site-packages/scipy/optimize/nonlin.py | 53 | 46004 | r"""
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
--------
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print 'Residual', abs(residual(sol)).max()
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as Scipy.
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError
from numpy import asarray, dot, vdot
if sys.platform != 'cli':
import scipy.sparse.linalg
import scipy.sparse
import scipy.lib.blas as blas
import inspect
else:
print "Warning: scipy.optimize.nonlin package is not supported under IronPython yet."
from linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov',
# Deprecated functions:
'broyden_generalized', 'anderson2', 'broyden3']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array-like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : array-like
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
if condition.check(Fx, x, dx):
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x += dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
raise NoConvergence(_array_like(x, x0))
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = maxnorm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return True
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return self.iteration > self.iter
# NB: condition must succeed for rtol=inf even if norm == 0
return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# autoscale the initial Jacobian parameter
self.alpha = 0.5*max(norm(x0), 1) / norm(f0)
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = blas.get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = blas.get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [vR]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='qr', econ=True)
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Extra parameters:
- ``to_retain`: number of SVD components to retain when
rank reduction is done. Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by numerical
differentiation:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [KK]_,
and for the LGMRES sparse inverse method, see [BJM]_.
References
----------
.. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = zip(args[-len(defaults):], defaults)
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that it's keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec wrapper in ns
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
# Deprecated functions
@np.deprecate
def broyden_generalized(*a, **kw):
"""Use *anderson(..., w0=0)* instead"""
kw.setdefault('w0', 0)
return anderson(*a, **kw)
@np.deprecate
def broyden1_modified(*a, **kw):
"""Use `broyden1` instead"""
return broyden1(*a, **kw)
@np.deprecate
def broyden_modified(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def anderson2(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def broyden3(*a, **kw):
"""Use `broyden2` instead"""
return broyden2(*a, **kw)
@np.deprecate
def vackar(*a, **kw):
"""Use `diagbroyden` instead"""
return diagbroyden(*a, **kw)
| gpl-3.0 |
javalovelinux/SparkGroovyScript | python/pyspark/sql/context.py | 11 | 23630 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
makism/dyfunconn | tests/test_clustering.py | 1 | 3390 | # -*- coding: utf-8 -*-
"""Test for the cluster module."""
import pytest
import scipy as sp
from scipy import io
import numpy as np
from numpy import testing
import sklearn
from sklearn import datasets
import dyconnmap
from dyconnmap import cluster
@pytest.fixture()
def initialize():
"""Prepare random random (with seed)."""
rng = np.random.RandomState(seed=0)
data, _ = sklearn.datasets.make_moons(n_samples=1024, noise=0.125, random_state=rng)
return rng, data
def test_clustering_ng(initialize):
"""Test for the Neural Gas algorithm."""
# Groundtruth
result_protos = np.load("groundtruth/cluster/ng_protos.npy")
result_symbols = np.load("groundtruth/cluster/ng_symbols.npy")
# Input data
rng, data = initialize
# Run
ng = dyconnmap.cluster.NeuralGas(rng=rng).fit(data)
protos = ng.protos
_, symbols = ng.encode(data)
# Test
np.testing.assert_array_almost_equal(protos, result_protos)
np.testing.assert_array_almost_equal(symbols, result_symbols)
def test_clustering_rng(initialize):
"""Test for the Relational Neural Gas algorithm."""
# Groundtruth
result_protos = np.load("groundtruth/cluster/rng_protos.npy")
result_symbols = np.load("groundtruth/cluster/rng_symbols.npy")
# Data
rng, data = initialize
# Run
reng = dyconnmap.cluster.RelationalNeuralGas(
n_protos=10, iterations=100, rng=rng
).fit(data)
protos = reng.protos
_, symbols = reng.encode(data)
# Test
np.testing.assert_array_almost_equal(protos, result_protos)
np.testing.assert_array_almost_equal(symbols, result_symbols)
def test_clustering_mng(initialize):
"""Test for the Merge Neural Gas algorithm."""
# Groundtruth
result_protos = np.load("groundtruth/cluster/mng_protos.npy")
# Data
rng, data = initialize
# Run
protos = dyconnmap.cluster.MergeNeuralGas(rng=rng).fit(data).protos
# Test
np.testing.assert_array_almost_equal(protos, result_protos)
def test_clustering_gng(initialize):
"""Test for the Growing Neural Gas algorithm."""
# Groundtruth
result_protos = np.load("groundtruth/cluster/gng_protos.npy")
result_symbols = np.load("groundtruth/cluster/gng_symbols.npy")
# Data
rng, data = initialize
# Run
gng = dyconnmap.cluster.GrowingNeuralGas(rng=rng)
gng.fit(data)
protos = gng.protos
encoding, symbols = gng.encode(data)
# Test
np.testing.assert_array_almost_equal(protos, result_protos)
np.testing.assert_array_almost_equal(symbols, result_symbols)
def test_clustering_som(initialize):
"""Test for the Self-Organizing Maps algorithm."""
# Groundtruth
result_protos = np.load("groundtruth/cluster/som_protos.npy")
# Data
rng, data = initialize
# Run
protos = dyconnmap.cluster.SOM(grid=(8, 4), rng=rng).fit(data).weights
# Test
np.testing.assert_array_almost_equal(protos, result_protos)
def test_clustering_som_umatrix(initialize):
"""Test for the SOM' UMatrix."""
# Groundtruth
result_umatrix = np.load("groundtruth/cluster/som_umatrix.npy")
# Data
rng, data = initialize
# Run
protos = dyconnmap.cluster.SOM(grid=(8, 4), rng=rng).fit(data).weights
umatrix = dyconnmap.cluster.umatrix(protos)
# Test
np.testing.assert_array_almost_equal(umatrix, result_umatrix)
| bsd-3-clause |
richardotis/scipy | scipy/special/basic.py | 9 | 62504 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta,
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch,
binom)
from . import specfun
from . import orthogonal
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros',
'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where n is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8,8));
>>> for idx, n in enumerate([2,3,4,9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute nt zeros of Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length nt, corresponding to the first nt zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v,z,n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Compute nth derivative of modified Bessel function Kv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
def sph_in(n,z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n,z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,kn,knp = specfun.sphk(n1,z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n,z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
nm,kn,knp = specfun.sphk(n1,z)
return In[:(n+1)],Inp[:(n+1)],kn[:(n+1)],knp[:(n+1)]
def riccati_jn(n,x):
"""Compute Ricatti-Bessel function of the first kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rctj(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def riccati_yn(n,x):
"""Compute Ricatti-Bessel function of the second kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rcty(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1,nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt), specfun.fcszo(1,nt)
def hyp0f1(v, z):
r"""Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.
It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
the differential equation :math:`f''(z) + vf'(z) = f(z)`.
"""
v = atleast_1d(v)
z = atleast_1d(z)
v, z = np.broadcast_arrays(v, z)
arg = 2 * sqrt(abs(z))
old_err = np.seterr(all='ignore') # for z=0, a<1 and num=inf, next lines
num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
den = abs(z)**((v - 1.0) / 2)
num *= gamma(v)
np.seterr(**old_err)
num[z == 0] = 1
den[z == 0] = 1
return num / den
def assoc_laguerre(x, n, k=0.0):
"""Compute nth-order generalized (associated) Laguerre polynomial.
The polynomial :math:`L^(alpha)_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**alpha`` with ``alpha > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1,x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m,q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m,q)
fc = specfun.fcoef(kd,m,q,a)
return fc[:km]
def mathieu_odd_coef(m,q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m,q)
fc = specfun.fcoef(kd,m,q,b)
return fc[:km]
def lpmn(m,n,z):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf,0.0,(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf,0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.lpmn(mp,n,z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf,0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf,0.0,gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.clpmn(mp,n,real(z),imag(z),type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def lqmn(m,n,z):
"""Associated Legendre function of the second kind, Qmn(z).
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1,m)
nn = max(1,n)
if iscomplex(z):
q,qd = specfun.clqmn(mm,nn,z)
else:
q,qd = specfun.lqmn(mm,nn,z)
return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n,z):
"""Legendre functions of the first kind, Pn(z).
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn,pd = specfun.clpn(n1,z)
else:
pn,pd = specfun.lpn(n1,z)
return pn[:(n+1)],pd[:(n+1)]
## lpni
def lqn(n,z):
"""Legendre functions of the second kind, Qn(z).
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn,qd = specfun.clqn(n1,z)
else:
qn,qd = specfun.lqnb(n1,z)
return qn[:(n+1)],qd[:(n+1)]
def ai_zeros(nt):
"""Compute nt zeros of Airy function Ai(x) and derivative, and corresponding values.
Computes the first nt zeros, a, of the Airy function Ai(x); first nt zeros,
a', of the derivative of the Airy function Ai'(x); the corresponding values
Ai(a'); and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First nt zeros of Ai(x)
ap : ndarray
First nt zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first nt zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first nt zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def bi_zeros(nt):
"""Compute nt zeros of Airy function Bi(x) and derivative, and corresponding values.
Computes the first nt zeros, b, of the Airy function Bi(x); first nt zeros,
b', of the derivative of the Airy function Bi'(x); the corresponding values
Bi(b'); and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First nt zeros of Bi(x)
bp : ndarray
First nt zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first nt zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first nt zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def lmbda(v,x):
"""Jahnke-Emden Lambda function, Lambdav(x).
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1,x)
else:
vm, vl, dl = specfun.lamn(v1,x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v,x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbdv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbvv_seq(v,x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbvv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbdn_seq(n,z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb,cpd = specfun.cpbdn(n1,z)
return cpb[:n1+1],cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1), \
specfun.klvnzo(nt,2), \
specfun.klvnzo(nt,3), \
specfun.klvnzo(nt,4), \
specfun.klvnzo(nt,5), \
specfun.klvnzo(nt,6), \
specfun.klvnzo(nt,7), \
specfun.klvnzo(nt,8)
def pro_cv_seq(m,n,c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,1)[1][:maxL]
def obl_cv_seq(m,n,c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,-1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a,b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
N = int(N)
k = int(k)
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
k,N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def factorial(n,exact=False):
"""The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=False case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3,4,5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0
val = 1
for k in xrange(1,n+1):
val *= k
return val
else:
n = asarray(n)
vals = gamma(n+1)
return where(n >= 0,vals,0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n,0,-2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
| bsd-3-clause |
wmaciel/van-crime | src/utilities2.py | 1 | 6804 | '''
Created by:
Juan Sarria
March 15, 2016
'''
import pandas as pd, numpy as np, fiona, timeit
from geopy.distance import vincenty
from shapely import geometry
from utilities import utm_to_latlong, latlong_to_utm
from __builtin__ import False
from pandas.core.frame import DataFrame
PROJECT_ROOT = '../'
def main():
#test values
lat = 49.2668355595
lon = -123.070244095
year = 2010
month = 5
'''
prop_df = pd.read_csv(PROJECT_ROOT + 'data/property_tax_06_15/latlong_property_tax_' + str(2006) + '.csv')
print avg_closest_properties(lat,lon,prop_df=prop_df)
sky_df = pd.read_csv(PROJECT_ROOT + 'data/skytrain_stations/rapid_transit_stations.csv')
print closest_skytrain(lat,lon)
crime_df = pd.read_csv(PROJECT_ROOT+'/data/crime_03_15/crime_latlong.csv')
neighbourhoods = crime_df['NEIGHBOURHOOD'].unique().tolist()
print len(neighbourhoods)
print one_hot_encoding(neighbourhoods[2],neighbourhoods)
a = number_graffiti(lat,lon)
print type(a[0])
'''
data = pd.read_csv(PROJECT_ROOT+'/data/crime_03_15/crime_latlong.csv')
data = data[data['YEAR'] >= 2006].sample(1000)
data = data[['LATITUDE','LONGITUDE', 'NEIGHBOURHOOD']]
data2 = data.apply(lambda row: pd.Series(locate_neighbourhood(row['LATITUDE'], row['LONGITUDE']),
index=['NEIGHBOURHOOD_2']),axis=1)
data = pd.concat([data,data2],axis=1)[['NEIGHBOURHOOD','NEIGHBOURHOOD_2']]
data = data[data['NEIGHBOURHOOD'] != data['NEIGHBOURHOOD_2']][pd.notnull(data['NEIGHBOURHOOD'])]
print data
print data.count()
def avg_closest_properties(lat, lon,year = None, prop_df = None, range_val = 0.0001):
try:
if year is not None:
property_file = PROJECT_ROOT + 'data/property_tax_06_15/latlong_property_tax_' + str(year) + '.csv'
if prop_df is None: prop_df = pd.read_csv(property_file)
# Keep a copy of original df
temp_df = prop_df
# Narrow down options to minimize unnecessary calculations
prop_df = prop_df[prop_df['LATITUDE']< lat+range_val]
prop_df = prop_df[prop_df['LATITUDE']> lat-range_val]
prop_df = prop_df[prop_df['LONGITUDE']< lon+range_val]
prop_df = prop_df[prop_df['LONGITUDE']> lon-range_val]
# If not enough values, start again with a bigger range
if prop_df.count()['VALUE'] < 10:
return avg_closest_properties(lat,lon,prop_df=temp_df,range_val=range_val*10)
# Apply vincenty in the remaining rows
prop_df['DIST_DIF'] = prop_df.apply(lambda row: vincenty((lat,lon),(row['LATITUDE'],row['LONGITUDE'])).m,axis=1)
# Find the top 10 and top 5 closest properties
ten_min_df = prop_df[['VALUE','DIST_DIF']].nsmallest(10,'DIST_DIF')
five_min_df = ten_min_df.nsmallest(5,'DIST_DIF')
# Return average property value for he top 5 and 10
return [five_min_df['VALUE'].mean(),ten_min_df['VALUE'].mean()]
except:
print "Error in avg_closest_properties"
def closest_skytrain(lat,lon, sky_df = None):
skytrain_file = PROJECT_ROOT + 'data/skytrain_stations/rapid_transit_stations.csv'
if sky_df is None: sky_df = pd.read_csv(skytrain_file)
vector = [0]*(sky_df.count()['STATION']+1)
# Find closest skytrain station
sky_df['DIST_DIF'] = sky_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
min_df = sky_df.nsmallest(1,'DIST_DIF')
vector[list(min_df.index)[0]] = 1
vector[-1] = min_df.iloc[0]['DIST_DIF']
# returns on-hot encoded vector with distance at the end
return vector
'''
def get_weather(year, month, weatherdf = None):
weather_file = PROJECT_ROOT + 'data/weather/VANCOUVER SEA ISLAND CCG/summarydata.csv'
if weatherdf is None:
weatherdf = pd.read_csv(weather_file)
# basic checking to see if we have reasonable data passed in.
if month > 12:
return False
if year >= 2006 and year <= 2015:
filter_year = weatherdf[(weatherdf.YEAR == year)]
line = filter_year[(filter_year.MONTH == month)].drop('YEAR',axis=1).drop('MONTH',axis=1)
return line
else:
filter_month = weatherdf[(weatherdf.MONTH == month)].drop('YEAR',axis=1).drop('MONTH',axis=1).mean(axis=0).to_frame().transpose()
return filter_month
'''
def one_hot_encoding(label, list_of_labels):
vector = [0]*len(list_of_labels)
vector[list_of_labels.index(label)] = 1
return vector
def number_graffiti(lat,lon, graf_df = None, radius1 = 50, radius2 = 100):
graffiti_file = PROJECT_ROOT + 'data/graffiti/graffiti.csv'
if graf_df is None: graf_df = pd.read_csv(graffiti_file)
# Narrow down options
graf_df = graf_df[graf_df['LAT'] < lat+.001]
graf_df = graf_df[graf_df['LAT'] > lat-.001]
graf_df = graf_df[graf_df['LONG'] < lon+.001]
graf_df = graf_df[graf_df['LONG'] < lon+.001]
if graf_df['LAT'].count() == 0: return [0,0]
# Apply vincenty for remaining rows
graf_df['DIST_DIF'] = graf_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
count_2 = graf_df[graf_df['DIST_DIF'] <= radius2]
count_1 = count_2[count_2['DIST_DIF'] <= radius1]
return [count_1['COUNT'].sum(), count_2['COUNT'].sum()]
def number_street_lights(lat,lon,light_df = None, radius = 50):
light_file = PROJECT_ROOT + 'data/street_lightings/street_lighting_poles.csv'
if light_df is None: light_df = pd.read_csv(light_file)
# Narrow down options
light_df = light_df[light_df['LAT'] < lat+.001]
light_df = light_df[light_df['LAT'] > lat-.001]
light_df = light_df[light_df['LONG'] < lon+.001]
light_df = light_df[light_df['LONG'] < lon+.001]
if light_df['LAT'].count() == 0 : return 0
# Apply vincenty and find number of lights within radius
light_df['DIST_DIF'] = light_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
min_lights = light_df[light_df['DIST_DIF'] < radius]
return min_lights['DIST_DIF'].count()
def locate_neighbourhood(lat, lon):
with fiona.open(PROJECT_ROOT+'data/neighbourhood_borders/local_area_boundary.shp') as neighbourhoods:
point = geometry.Point(lat,lon)
for n in neighbourhoods:
if n['properties']['NAME'] == 'Arbutus-Ridge': n['properties']['NAME'] = 'Arbutus Ridge'
if n['properties']['NAME'] == 'Downtown': n['properties']['NAME'] = 'Central Business District'
n['geometry']['coordinates'][0] = [utm_to_latlong(x[0],x[1]) for x in n['geometry']['coordinates'][0]]
shape = geometry.asShape(n['geometry'])
if shape.contains(point): return n['properties']['NAME']
return -1
if __name__ == "__main__":
main()
| mit |
bigswitch/snac-nox | src/scripts/buildtest/lookup.py | 1 | 1569 | #!/usr/bin/python
import matplotlib
matplotlib.use('Agg')
import pickle
import pwd
import os
import info
import graph
def create_image(argv):
p = info.Profile()
b = info.Build()
t = info.Test()
r = info.Result()
values = []
for v in argv[1:]:
if v == 'True':
values.append(True)
elif v == 'False':
values.append(False)
elif v == 'None':
values.append(None)
else:
try:
values.append(float(v))
except:
values.append(v)
(p.user, p.machine, p.run_date, \
b.commit, b.last_author, b.build_date, \
t.configuration, t.command, t.packets, t.rules, t.policies, \
r.total, r.user, r.system, ind, dep) = values
if p.user == p.machine == p.run_date:
p = p.user
if b.commit == b.last_author == b.build_date:
b = b.commit
if t.configuration == t.command == t.packets == t.rules == t.policies:
t = t.configuration
if r.total == r.user == r.system:
r = r.total
user = pwd.getpwuid(os.getuid())[0]
input = '/var/www/buildtest/' + user +'/archive/performance.pkl'
raw_points = pickle.load(open(input,'r'))
g = graph.Grapher(raw_points,'librarian')
search = info.RawData(p, b, t, r)
print search
g.graph(ind, dep, search)
if __name__ == "__main__":
import sys
create_image(sys.argv)
| gpl-3.0 |
ekopylova/tcga-1 | python_scripts/parse_kraken_to_biom.py | 2 | 7088 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2016--, Evguenia Kopylova.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Parse output of "kraken translate" and generate single BIOM table.
"""
import click
import pandas as pd
import numpy as np
from biom.table import Table
from biom.util import biom_open
def compute_biom_table(kraken_translate_report_fp,
taxonomic_rank,
taxa_levels,
taxa_levels_idx,
columns,
index):
"""Absolute abundance of number of reads matching a defined taxa level.
Parameters
----------
kraken_translate_report_fp: str
filepath to output of "kraken translate"
taxonomic_rank: str
taxonomy level (e.g., genus or species)
taxa_levels: dict
keys are full name taxonomic ranks and values are abbreviated ranks
taxa_levels_idx: dict
2-way dict storing integer depths for abbreviated taxonomic ranks
columns: list
sample IDs list
index: list
observation IDs (taxonomy strings) list
Returns
-------
biom_table: biom.Table
BIOM table
"""
total_levels = len(taxa_levels)
# columns are sample IDs and rows are taxonomy strings
abundances = pd.DataFrame(columns=columns, index=index)
taxonomic_rank_level_str = taxa_levels[taxonomic_rank]
taxonomic_rank_level_int = taxa_levels_idx[taxonomic_rank_level_str]
if taxonomic_rank_level_int < 6:
split_on_level = taxa_levels_idx[str(taxonomic_rank_level_int + 1)]
else:
# keep full string (to species level)
split_on_level = '\t'
with open(kraken_translate_report_fp) as kraken_translate_report_f:
for line in kraken_translate_report_f:
label, taxonomy = line.strip().split('\t')
# record abundance
if taxonomic_rank_level_str in taxonomy:
# keep taxonomy string up to specified level
taxonomy = taxonomy.split(split_on_level)[0]
sample_id = label.split('_')[0]
value = abundances.at[taxonomy, sample_id]
if np.isnan(value):
abundances.set_value(taxonomy, sample_id, 1.)
else:
abundances.set_value(taxonomy, sample_id, value+1.)
obs_ids = abundances.index.values.tolist()
for i in range(len(obs_ids)):
obs_ids[i] = obs_ids[i].replace("d__", "k__")
obs_ids[i] = obs_ids[i].replace("|", ";")
return Table(abundances.fillna(0.).as_matrix(),
obs_ids,
abundances.columns.values.tolist())
def prepare_dataframe(kraken_translate_report_fp,
taxonomic_rank,
taxa_levels,
taxa_levels_idx):
"""Return sets for sample IDs and taxonomy strings.
Parameters
----------
kraken_translate_report_fp: str
filepath to output of "kraken translate"
taxonomic_rank: str
taxonomy level (e.g., genus or species)
taxa_levels: dict
keys are full name taxonomic ranks and values are abbreviated ranks
taxa_levels_idx: dict
2-way dict storing integer depths for abbreviated taxonomic ranks
Returns
-------
sample_ids: list
all unique sample IDs in file
taxonomies: list
all unique taxonomies in file
"""
total_levels = len(taxa_levels)
taxonomic_rank_level_str = taxa_levels[taxonomic_rank]
taxonomic_rank_level_int = taxa_levels_idx[taxonomic_rank_level_str]
if taxonomic_rank_level_int < 6:
split_on_level = taxa_levels_idx[str(taxonomic_rank_level_int + 1)]
else:
# keep full string (to species level)
split_on_level = '\t'
sample_ids = set()
taxonomies = set()
with open(kraken_translate_report_fp) as kraken_translate_report_f:
for line in kraken_translate_report_f:
label, taxonomy = line.strip().split('\t')
sample_id = label.split('_')[0]
sample_ids.add(sample_id)
# record abundance
if taxonomic_rank_level_str in taxonomy:
# keep taxonomy string up to specified level
taxonomy = taxonomy.split(split_on_level)[0]
taxonomies.add(taxonomy)
return list(sample_ids), list(taxonomies)
def write_biom_table(table, biom_output_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.Table
an instance of a BIOM table
biom_output_fp: str
filepath to output BIOM table
"""
with biom_open(biom_output_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="tcga-kraken-translate")
@click.command()
@click.option('--kraken-translate-report-fp', required=True,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True))
@click.option('--taxonomic-rank', type=click.Choice(['genus', 'species',
'family', 'order',
'class', 'phylum',
'domain']),
required=False, default=['genus'], show_default=True,
help="Taxonomic rank at which to generate summary")
@click.option('--biom-output-fp', required=True,
type=click.Path(resolve_path=True, readable=True, exists=False,
file_okay=True),
help="Filepath to output BIOM table")
def main(kraken_translate_report_fp,
taxonomic_rank,
biom_output_fp):
taxa_levels = {"domain": "d__",
"phylum": "|p__",
"class": "|c__",
"order": "|o__",
"family": "|f__",
"genus": "|g__",
"species": "|s__"}
taxa_levels_idx = {"d__": 0, "|p__": 1, "|c__": 2,
"|o__": 3, "|f__": 4, "|g__": 5,
"|s__": 6, "6": "|s__", "5": "|g__",
"4": "|f__", "3": "|o__", "2": "|c__",
"1": "|p__", "0": "d__"}
columns, index = prepare_dataframe(
kraken_translate_report_fp=kraken_translate_report_fp,
taxonomic_rank=taxonomic_rank,
taxa_levels=taxa_levels,
taxa_levels_idx=taxa_levels_idx)
biom_table =\
compute_biom_table(
kraken_translate_report_fp=kraken_translate_report_fp,
taxonomic_rank=taxonomic_rank,
taxa_levels=taxa_levels,
taxa_levels_idx=taxa_levels_idx,
columns=columns,
index=index)
# output table
write_biom_table(biom_table, biom_output_fp)
if __name__ == "__main__":
main()
| bsd-3-clause |
shikhardb/scikit-learn | sklearn/datasets/species_distributions.py | 24 | 7871 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
workflo/dxf2gcode | python_examples/NURBS_fitting_by_Biarc_curves_wx.py | 1 | 51927 | #!/usr/bin/python
# -*- coding: cp1252 -*-
#
#NURBS_fittin_by_Biarc_curves
#Programmer: Christian Kohlöffel
#E-mail: n/A
#
#Copyright 2008 Christian Kohlöffel
#
#Distributed under the terms of the GPL (GNU Public License)
#
#dxf2gcode is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#import matplotlib
#matplotlib see: http://matplotlib.sourceforge.net/ and http://www.scipy.org/Cookbook/Matplotlib/
#numpy see: http://numpy.scipy.org/ and http://sourceforge.net/projects/numpy/
#matplotlib.use('TkAgg')
import matplotlib
# uncomment the following to use wx rather than wxagg
#matplotlib.use('WX')
#from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
# comment out the following to use wx rather than wxagg
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
from math import sqrt, sin, cos, atan2, radians, degrees, pi, floor, ceil
import sys
class NURBSClass:
def __init__(self,degree=0,Knots=[],Weights=None,CPoints=None):
self.degree=degree #Spline degree
self.Knots=Knots #Knoten Vektor
self.CPoints=CPoints #Kontrollpunkte des Splines [2D]
self.Weights=Weights #Gewichtung der Einzelnen Punkte
#Initialisieren von errechneten Größen
self.HCPts=[] #Homogenepunkte Vektoren [3D]
#Punkte in Homogene Punkte umwandeln
self.CPts_2_HCPts()
print self
#Erstellen der BSplineKlasse zur Berechnung der Homogenen Punkte
self.BSpline=BSplineClass(degree=self.degree,\
Knots=self.Knots,\
CPts=self.HCPts)
def __str__(self):
print len(self.Knots)
print len(self.Weights)
print len(self.CPoints)
str='\ndegree: %s' %self.degree
for j in range((self.degree+1)/2):
str=str+'\n'
str=str+'Knots[%s]: %s' % (j,self.Knots[j])
for i in range(len(self.Knots)-j-1):
str=str+'\n'
if i<len(self.CPoints):
str=str+' CPoints[%s]: %s' % (i,self.CPoints[i])
str=str+' Weights[%s]: %s' % (i,self.Weights[i])
str=str+'Knots[%s]: %s' % (i+j+1,self.Knots[i+j+1])
return str
def check_NURBSParameters(self,tol):
#Überprüfen des Knotenvektors
#Suchen von mehrfachen Knotenpunkte (Anzahl über degree+1 => Fehler?!)
knt_nr=1
knt_vec=[[self.Knots[0]]]
self.knt_m_change=[]
while knt_nr < len(self.Knots):
if self.Knots[knt_nr]==knt_vec[-1][-1]:
knt_vec[-1].append(self.Knots[knt_nr])
else:
knt_vec.append([self.Knots[knt_nr]])
knt_nr+=1
for knt_spts in knt_vec:
if (len(knt_spts)>self.degree+1):
raise ValueError, "Same Knots Nr. bigger then degree+1"
#Überprüfen der Steigungdifferenz vor und nach dem Punkt wenn Mehrfachknoten
elif ((len(knt_spts)>self.degree)and(knt_spts[-1]>0.0)and(knt_spts[-1]<1.0)):
temp, tangent0=self.NURBS_evaluate(n=1,u=knt_spts[0]-1e-12)
temp, tangent1=self.NURBS_evaluate(n=1,u=knt_spts[0])
if abs(tangent0-tangent1)>1e-6:
self.knt_m_change.append(knt_spts[0])
#Überprüfen der Kontrollpunkte
#Suchen von mehrachen Kontrollpunkten (Anzahl über degree+2 => nicht errechnen
ctlpt_nr=0
ctlpt_vec=[[ctlpt_nr]]
while ctlpt_nr < len(self.CPoints)-1:
ctlpt_nr+=1
if self.CPoints[ctlpt_nr].isintol(self.CPoints[ctlpt_vec[-1][-1]],tol):
ctlpt_vec[-1].append(ctlpt_nr)
else:
ctlpt_vec.append([ctlpt_nr])
self.ignor=[]
for same_ctlpt in ctlpt_vec:
if (len(same_ctlpt)>self.degree):
self.ignor.append([self.Knots[same_ctlpt[0]+self.degree/2],\
self.Knots[same_ctlpt[-1]+self.degree/2]])
#raise ValueError, "Same Controlpoints Nr. bigger then degree+1"
#print("Same Controlpoints Nr. bigger then degree+2")
for ignor in self.ignor:
print("Ignoring u's between u: %s and u: %s" %(ignor[0],ignor[1]))
if len(self.knt_m_change):
print("Non steady Angles between Knots: %s" %self.knt_m_change)
#Berechnen von eine Anzahl gleichmässig verteilter Punkte und bis zur ersten Ableitung
def calc_curve(self,n=0, cpts_nr=20):
#Anfangswerte für Step und u
u=0; Points=[]; tang=[]
step=self.Knots[-1]/(cpts_nr-1)
while u<=1.0:
Pt,tangent=self.NURBS_evaluate(n=n,u=u)
Points.append(Pt)
#Für die erste Ableitung wird den Winkel der tangente errechnet
if n>=1:
tang.append(tangent)
u+=step
if n>=1:
return Points, tang
else:
return Points
#Berechnen eines Punkts des NURBS und der ersten Ableitung
def NURBS_evaluate(self,n=0,u=0):
#Errechnen der korrigierten u's
#cor_u=self.correct_u(u)
#Errechnen der Homogenen Punkte bis zur n ten Ableitung
HPt=self.BSpline.bspline_ders_evaluate(n=n,u=u)
#Punkt wieder in Normal Koordinaten zurück transformieren
Point=self.HPt_2_Pt(HPt[0])
#Errechnen der ersten Ableitung wenn n>0 als Richtungsvektor
dPt=[]
tangent=None
if n>0:
# w(u)*A'(u)-w'(u)*A(u)
#dPt=---------------------
# w(u)^2
for j in range(len(HPt[0])-1):
dPt.append((HPt[0][-1]*HPt[1][j]-HPt[1][-1]*HPt[0][j])/
pow(HPt[0][-1],2))
#Berechnen des Winkels des Vektors
tangent=atan2(dPt[1],dPt[0])
return Point, tangent
else:
return Point
#Umwandeln der NURBS Kontrollpunkte und Weight in einen Homogenen Vektor
def CPts_2_HCPts(self):
for P_nr in range(len(self.CPoints)):
HCPtVec=[self.CPoints[P_nr].x*self.Weights[P_nr],\
self.CPoints[P_nr].y*self.Weights[P_nr],\
self.Weights[P_nr]]
self.HCPts.append(HCPtVec[:])
#Umwandeln eines Homogenen PunktVektor in einen Punkt
def HPt_2_Pt(self,HPt):
return PointClass(x=HPt[0]/HPt[-1],y=HPt[1]/HPt[-1])
class BSplineClass:
def __init__(self,degree=0,Knots=[],CPts=[]):
self.degree=degree
self.Knots=Knots
self.CPts=CPts
self.Knots_len=len(self.Knots)
self.CPt_len=len(self.CPts[0])
self.CPts_len=len(self.CPts)
#Eingangsprüfung, ober KnotenAnzahl usw. passt
if self.Knots_len< self.degree+1:
raise ValueError, "degree greater than number of control points."
if self.Knots_len != (self.CPts_len + self.degree+1):
print ("shall be: %s" %(self.CPts_len + self.degree+1))
print ("is: %s" %self.Knots_len)
raise ValueError, "Knot/Control Point/degree number error."
#Berechnen von eine Anzahl gleichmässig verteilter Punkte bis zur n-ten Ableitung
def calc_curve(self,n=0,cpts_nr=20):
#Anfangswerte für Step und u
u=0
step=float(self.Knots[-1])/(cpts_nr-1)
Points=[]
#Wenn die erste Ableitung oder höher errechnet wird die ersten
#Ableitung in dem tan als Winkel in rad gespeichert
tang=[]
while u<=self.Knots[-1]:
CK=self.bspline_ders_evaluate(n=n,u=u)
#Den Punkt in einem Punkt List abspeichern
Points.append(PointClass(x=CK[0][0],y=CK[0][1]))
#Für die erste Ableitung wird den Winkel der tangente errechnet
if n>=1:
tang.append(atan2(CK[1][1],CK[1][0]))
u+=step
return Points, tang
#Modified Version of Algorithm A3.2 from "THE NURBS BOOK" pg.93
def bspline_ders_evaluate(self,n=0,u=0):
#Berechnung der Position im Knotenvektor
span=self.findspan(u)
#Berechnen der Basis Funktion bis zur n ten Ableitung am Punkt u
dN=self.ders_basis_functions(span,u,n)
p=self.degree
du=min(n,p)
CK=[]
dPts=[]
for i in range(self.CPt_len):
dPts.append(0.0)
for k in range(n+1):
CK.append(dPts[:])
for k in range(du+1):
for j in range(p+1):
for i in range(self.CPt_len):
CK[k][i]+=dN[k][j]*self.CPts[span-p+j][i]
return CK
#Algorithm A2.1 from "THE NURBS BOOK" pg.68
def findspan(self,u):
#Spezialfall wenn der Wert==Endpunkt ist
if(u==self.Knots[-1]):
return self.Knots_len-self.degree-2 #self.Knots_len #-1
#Binäre Suche starten
#(Der Interval von low zu high wird immer halbiert bis
#wert zwischen im Intervall von Knots[mid:mi+1] liegt)
low=self.degree
high=self.Knots_len
mid=(low+high)/2
while ((u <self.Knots[mid])or(u>=self.Knots[mid+1])):
if (u<self.Knots[mid]):
high=mid
else:
low=mid
mid=(low+high)/2
return mid
#Algorithm A2.3 from "THE NURBS BOOK" pg.72
def ders_basis_functions(self,span,u,n):
d=self.degree
#initialisieren der a Matrix
a=[]
zeile=[]
for j in range(d+1):
zeile.append(0.0)
a.append(zeile[:]); a.append(zeile[:])
#initialisieren der ndu Matrix
ndu=[]
zeile=[]
for i in range(d+1):
zeile.append(0.0)
for j in range(d+1):
ndu.append(zeile[:])
#initialisieren der ders Matrix
ders=[]
zeile=[]
for i in range(d+1):
zeile.append(0.0)
for j in range(n+1):
ders.append(zeile[:])
ndu[0][0]=1.0
left=[0]
right=[0]
for j in range(1,d+1):
#print('komisch span:%s, j:%s, u:%s, gesamt: %s' %(span,j,u,span+1-j))
left.append(u-self.Knots[span+1-j])
right.append(self.Knots[span+j]-u)
saved=0.0
for r in range(j):
#Lower Triangle
ndu[j][r]=right[r+1]+left[j-r]
temp=ndu[r][j-1]/ndu[j][r]
#Upper Triangle
ndu[r][j]=saved+right[r+1]*temp
saved=left[j-r]*temp
ndu[j][j]=saved
#Ergebniss aus S71
#print("Ndu: %s" %ndu)
#Load the basis functions
for j in range(d+1):
ders[0][j]=ndu[j][d]
#This section computes the derivatives (Eq. [2.9])
for r in range(d+1): #Loop over function index
s1=0; s2=1 #Alternate rows in array a
a[0][0]=1.0
for k in range(1,n+1):
der=0.0
rk=r-k; pk=d-k
#print("\nrk: %s" %rk), print("pk: %s" %pk), print("s1: %s" %s1)
#print("s2: %s" %s2), print("r: %s" %r) ,print("k: %s" %k)
#print("j: %s" %j)
#wenn r-k>0 (Linker Term) und somit
if(r>=k):
a[s2][0]=a[s1][0]/ndu[pk+1][rk] #2te: a[0][0] 1/
#print("a[%s][0]=a[%s][0](%s)/ndu[%s][%s](%s)=%s" \
# %(s2,s1,a[s1][0],pk+1,rk,ndu[pk+1][rk],a[s2][0]))
der=a[s2][0]*ndu[rk][pk]
if (rk>=-1):
j1=1
else:
j1=-rk
if (r-1<=pk):
j2=k-1
else:
j2=d-r
#Hier geht er bei der ersten Ableitung gar nicht rein
#print("j1:%s j2:%s" %(j1,j2))
for j in range(j1,j2+1):
a[s2][j]=(a[s1][j]-a[s1][j-1])/ndu[pk+1][rk+j]
der+=a[s2][j]*ndu[rk+j][pk]
if(r<=pk):
a[s2][k]=-a[s1][k-1]/ndu[pk+1][r] #1/ u(i+p+1)-u(i+1)
der+=a[s2][k]*ndu[r][pk] #N(i+1)(p-1)
#print("a[%s][%s]=-a[%s][%s](%s)/ndu[%s][%s](%s)=%s" \
# %(s2,k,s1,k-1,a[s1][k-1],pk+1,r,ndu[pk+1][r],a[s2][k]))
#print("ndu[%s][%s]=%s" %(r,pk,ndu[r][pk]))
ders[k][r]=der
#print("ders[%s][%s]=%s" %(k,r,der))
j=s1; s1=s2; s2=j #Switch rows
#Multiply through by the the correct factors
r=d
for k in range(1,n+1):
for j in range(d+1):
ders[k][j] *=r
r*=(d-k)
return ders
class BiarcFittingClass:
def __init__(self,degree, CPoints, Weights, Knots):
#Max Abweichung für die Biarc Kurve
self.epsilon=0.01
self.epsilon_high=self.epsilon*0.03
self.segments=50
#NURBS Klasse initialisieren
self.NURBS=NURBSClass(degree=degree,Knots=Knots,CPoints=CPoints,Weights=Weights)
#Überprüfen der NURBS Parameter Überprüfung der NURBS Kontrollpunkte ob welche doppelt
#Innerhalb der gegebenen Tolerans sind (=> Ignorieren)
self.NURBS.check_NURBSParameters(self.epsilon)
#High Accuracy Biarc fitting of NURBS
BiarcCurves, self.PtsVec=self.calc_high_accurancy_BiarcCurve()
#Komprimieren der Biarc und der Linien
self.Curve=self.analyse_and_compress(BiarcCurves)
def analyse_and_compress(self,BiarcCurves):
#Compress all to one curve
Curves=[]
for BiarcCurve in BiarcCurves:
Curve=[]
for Biarc in BiarcCurve:
for geo in Biarc.geos:
Curve.append(geo)
#print ("Vor Linie: Elemente: %0.0f" %len(Curve))
Curve=self.compress_lines(Curve)
#print ("Nach Linie: Elemente: %0.0f" %len(Curve))
Curve=self.compress_biarcs(Curve)
#print ("Nach Biarc: Elemente: %0.0f" %len(Curve))
Curves+=Curve
return Curves
def compress_biarcs(self,Curves):
NewCurve=[]
tau=self.epsilon
Pts=[]
#Schleife für die Anzahl der Geometrirs
for geo in Curves:
NewCurve.append(geo)
#Wenn die Länge mindestens 3 sind
if len(NewCurve)>=3:
#Steigende Spirale
if ((NewCurve[-3].type=="ArcGeo")\
and(NewCurve[-2].type=="ArcGeo")\
and(NewCurve[-1].type=="ArcGeo")):
Pts.append(geo.Pe)
if(NewCurve[-3].r<=NewCurve[-2].r)\
and(NewCurve[-2].r<=NewCurve[-1].r)\
and((NewCurve[-3].ext*NewCurve[-2].ext)>=0.0)\
and((NewCurve[-2].ext*NewCurve[-1].ext)>=0.0):
#print "Increasing"
anz=len(NewCurve)
triarc=NewCurve[anz-3:anz]
Arc0,Arc1= self.fit_triac_by_inc_biarc(triarc,tau)
diff=self.check_diff_to_pts(Pts,Arc0,Arc1)
#Überprüfen ob es in Toleranz liegt
try:
if max(diff)<self.epsilon:
tau=self.calc_active_tolerance_inc(self.epsilon,triarc,Arc0,Arc1)
del NewCurve[anz-3:anz]
NewCurve.append(Arc0)
NewCurve.append(Arc1)
except:
pass
elif (NewCurve[-3].r>NewCurve[-2].r)\
and(NewCurve[-2].r>NewCurve[-1].r)\
and((NewCurve[-3].ext*NewCurve[-2].ext)>=0.0)\
and((NewCurve[-2].ext*NewCurve[-1].ext)>=0.0):
#print "Decreasing"
anz=len(NewCurve)
triarc=NewCurve[anz-3:anz]
Arc0,Arc1= self.fit_triac_by_dec_biarc(triarc,tau)
diff=self.check_diff_to_pts(Pts,Arc1,Arc0)
try:
if max(diff)<self.epsilon:
tau=self.calc_active_tolerance_dec(self.epsilon,triarc,Arc0,Arc1)
del NewCurve[anz-3:anz]
NewCurve.append(Arc0)
NewCurve.append(Arc1)
except:
pass
else:
Pts=[]
return NewCurve
def calc_active_tolerance_inc(self,tau,arc,Arc0,Arc1):
V0=arc[0].Pa.unit_vector(arc[0].O)
Vb=Arc1.Pa.unit_vector(Arc1.O)
t_=(2*arc[0].r*tau+pow(tau,2))/\
(2*(arc[0].r+(arc[0].r+tau)*V0*Vb))
te=arc[0].r+t_-(Arc0.Pe-(arc[0].O+(t_*V0))).distance()
tm=arc[1].O.distance(Arc0.Pe)-abs(arc[1].r)
if tm<0.0:
tf=tau
else:
tf=tau-tm
#print("tm: %0.3f; te: %0.3f; tau: %0.3f" %(tm,te,tau))
epsilon=min([te,tf,tau])
if epsilon<0.0:
epsilon=0.0
return epsilon
def calc_active_tolerance_dec(self,tau,arc,Arc0,Arc1):
V0=arc[2].Pa.unit_vector(arc[2].O)
Vb=Arc1.Pa.unit_vector(Arc1.O)
t_=(2*arc[2].r*tau+pow(tau,2))/\
(2*(arc[2].r+(arc[2].r+tau)*V0*Vb))
te=arc[2].r+t_-(Arc0.Pe-(arc[2].O+(t_*V0))).distance()
te=tau
tm=-arc[1].O.distance(Arc0.Pe)+abs(arc[1].r)
if tm<0.0:
tf=tau
else:
tf=tau-tm
#print("tm: %0.3f; tf: %0.3f; te: %0.3f; tau: %0.3f" %(tm,tf,te,tau))
epsilon=min([te,tf,tau])
if epsilon<0.0:
epsilon=0.0
return epsilon
def fit_triac_by_inc_biarc(self,arc,eps):
#Errechnen von tb
V0=arc[0].Pa.unit_vector(arc[0].O)
V2=arc[2].Pe.unit_vector(arc[2].O)
#Errechnen der Hilfgrössen
t0=(arc[2].r-arc[0].r)
D=(arc[2].O-arc[0].O)
X0=(t0*t0)-(D*D)
X1=2*(D*V0-t0)
Y0=2*(t0-D*V2)
Y1=2*(V0*V2-1)
#Errechnen von tb
tb=(pow((arc[1].r-arc[0].r+eps),2)-((arc[1].O-arc[0].O)*(arc[1].O-arc[0].O)))/\
(2*(arc[1].r-arc[0].r+eps+(arc[1].O-arc[0].O)*V0))
#Errechnen von tc
tc=(pow(t0,2)-(D*D))/(2*(t0-D*V0))
#Auswahl von t
t=min([tb,tc])
#Errechnen von u
u=(X0+X1*t)/(Y0+Y1*t)
#Errechnen der neuen Arcs
Oa=arc[0].O+t*V0
ra=arc[0].r+t
Ob=arc[2].O-u*V2
rb=arc[2].r-u
Vn=Ob.unit_vector(Oa)
Pn=Oa+ra*Vn
Arc0=ArcGeo(Pa=arc[0].Pa,Pe=Pn,O=Oa,r=ra,dir=arc[0].ext)
Arc1=ArcGeo(Pa=Pn,Pe=arc[2].Pe,O=Ob,r=rb,dir=arc[2].ext)
## print('\nAlte')
## print arc[0]
## print arc[1]
## print arc[2]
## print("tb: %0.3f; tc: %0.3f; t: %0.3f; u: %0.3f" %(tb,tc,t,u))
## print 'Neue'
## print Arc0
## print Arc1
return Arc0, Arc1
def fit_triac_by_dec_biarc(self,arc,eps):
V0=arc[2].Pe.unit_vector(arc[2].O)
V2=arc[0].Pa.unit_vector(arc[0].O)
#Errechnen der Hilfgrössen
t0=(arc[0].r-arc[2].r)
D=(arc[0].O-arc[2].O)
X0=(t0*t0)-(D*D)
X1=2*(D*V0-t0)
Y0=2*(t0-D*V2)
Y1=2*(V0*V2-1)
#Errechnen von tb
tb=(pow((arc[1].r-arc[2].r+eps),2)-((arc[1].O-arc[2].O)*(arc[1].O-arc[2].O)))/\
(2*(arc[1].r-arc[2].r+eps+(arc[1].O-arc[2].O)*V0))
#Errechnen von tc
tc=(pow(t0,2)-(D*D))/(2*(t0-D*V0))
#Auswahl von t
t=min([tb,tc])
#Errechnen von u
u=(X0+X1*t)/(Y0+Y1*t)
#Errechnen der neuen Arcs
Oa=arc[0].O-u*V2
ra=arc[0].r-u
Ob=arc[2].O+t*V0
rb=arc[2].r+t
Vn=Oa.unit_vector(Ob)
Pn=Ob+rb*Vn
Arc0=ArcGeo(Pa=arc[0].Pa,Pe=Pn,O=Oa,r=ra,\
s_ang=Oa.norm_angle(arc[0].Pa),e_ang=Oa.norm_angle(Pn),dir=arc[0].ext)
Arc1=ArcGeo(Pa=Pn,Pe=arc[2].Pe,O=Ob,r=rb,\
s_ang=Ob.norm_angle(Pn),e_ang=Ob.norm_angle(arc[2].Pe),dir=arc[2].ext)
return Arc0, Arc1
def check_diff_to_pts(self,Pts,Arc0,Arc1):
diff=[]
for Pt in Pts:
w0=Arc0.O.norm_angle(Pt)
w1=Arc1.O.norm_angle(Pt)
if (w0>=min([Arc0.s_ang,Arc0.e_ang]))and\
(w0<=max([Arc0.s_ang,Arc0.e_ang])):
diff.append(abs(Arc0.O.distance(Pt)-abs(Arc0.r)))
elif (w1>=min([Arc1.s_ang,Arc1.e_ang]))and\
(w1<=max([Arc1.s_ang,Arc1.e_ang])):
diff.append(abs(Arc1.O.distance(Pt)-abs(Arc1.r)))
else:
del Pts[Pts.index(Pt)]
return diff
def compress_lines(self,Curve):
joint=[]
NewCurve=[]
Pts=[]
for geo in Curve:
NewCurve.append(geo)
anz=len(NewCurve)
if anz>=2:
#Wenn Geo eine Linie ist anhängen und überprüfen
if (NewCurve[-2].type=="LineGeo") and (NewCurve[-1].type=="LineGeo"):
Pts.append(geo.Pe)
JointLine=LineGeo(NewCurve[-2].Pa,NewCurve[-1].Pe)
#Überprüfung der Abweichung
res=[]
for point in Pts:
res.append(JointLine.distance2point(point))
#print res
#Wenn die Abweichung OK ist Vorheriges anhängen
if (max(res)<self.epsilon):
anz=len(NewCurve)
del NewCurve[anz-2:anz]
NewCurve.append(JointLine)
points=[geo.Pe]
#Wenn nicht nicht anhängen und Pts zurücksetzen
else:
Pts=[geo.Pe]
#Wenn es eines eine andere Geometrie als eine Linie ist
else:
Pts=[]
return NewCurve
def calc_high_accurancy_BiarcCurve(self):
#Berechnen der zu Berechnenden getrennten Abschnitte
u_sections=self.calc_u_sections(self.NURBS.Knots,\
self.NURBS.ignor,\
self.NURBS.knt_m_change[:])
#Step muß ungerade sein, sonst gibts ein Rundungsproblem um 1
self.max_step=float(self.NURBS.Knots[-1]/(float(self.segments)))
#Berechnen des ersten Biarcs fürs Fitting
BiarcCurves=[]
PtsVecs=[]
#Schleife für die einzelnen Abschnitte
for u_sect in u_sections:
BiarcCurve, PtsVec=self.calc_Biarc_section(u_sect,self.epsilon_high)
BiarcCurves.append(BiarcCurve)
PtsVecs.append(PtsVec)
return BiarcCurves, PtsVecs
def calc_u_sections(self,Knots,ignor,unsteady):
#Initialisieren
u_sections=[]
#Abfrage ob bereits der Anfang ignoriert wird
u_beg=Knots[0]
u_end=Knots[0]
ig_nr=0
#Schleife bis u_end==Knots[0]
while u_end<Knots[-1]:
u_beg=u_end
#Wenn Ignor == Start dann Start = Ende von Ignor
if len(ignor)>ig_nr:
if u_beg==ignor[ig_nr][0]:
u_beg=ignor[ig_nr][1]
ig_nr+=1
#Löschen der unsteadys bis größer als u_beg
while (len(unsteady)>0)and(unsteady[0]<=u_beg):
del(unsteady[0])
#Wenn Ignor noch mehr beiinhaltet dann Ignor Anfang = Ende
if len(ignor)>ig_nr:
u_end=ignor[ig_nr][0]
else:
u_end=Knots[-1]
if (len(unsteady)>0)and(unsteady[0]<u_end):
u_end=unsteady[0]
del(unsteady[0])
#Solange u_beg nicht das Ende ist anhängen
if not(u_beg==u_end):
u_sections.append([u_beg,u_end])
return u_sections
def calc_Biarc_section(self,u_sect,max_tol):
min_u=1e-9
BiarcCurve=[]
cur_step=self.max_step
u=u_sect[0]+min_u
PtsVec=[self.NURBS.NURBS_evaluate(n=1,u=u)]
step=0
#Berechnen bis alle Biarcs berechnet sind
while(u<u_sect[-1]-min_u):
step+=1
u+=cur_step
#Begrenzung von u auf den Maximalwert
if u>u_sect[-1]:
cur_step=u_sect[-1]-(u-cur_step)-min_u
u=u_sect[-1]-min_u
PtVec=self.NURBS.NURBS_evaluate(n=1,u=u)
#Aus den letzten 2 Punkten den nächsten Biarc berechnen
Biarc=BiarcClass(PtsVec[-1][0],PtsVec[-1][1],PtVec[0],PtVec[1],max_tol)
if Biarc.shape=="Zero":
self.cur_step=min([cur_step*2,self.max_step])
elif Biarc.shape=="LineGeo":
BiarcCurve.append(Biarc)
cur_step=min([cur_step*2,self.max_step])
PtsVec.append(PtVec)
else:
if Biarc.check_biarc_fitting_tolerance(self.NURBS,max_tol,u-cur_step,u):
PtsVec.append(PtVec)
BiarcCurve.append(Biarc)
cur_step=min([cur_step/0.7,self.max_step])
else:
u-=cur_step
cur_step*=0.7
if step>1000:
raise ValueError, "Iteraitions above 1000 reduce tolerance"
return BiarcCurve, PtsVec
class BiarcClass:
def __init__(self,Pa=[],tan_a=[],Pb=[],tan_b=[],min_len=1e-5):
min_alpha=1e-4 #Winkel ab welchem Gerade angenommen wird inr rad
max_r=5e3 #Max Radius ab welchem Gerade angenommen wird (10m)
self.Pa=Pa
self.tan_a=tan_a
self.Pb=Pb
self.tan_b=tan_b
self.l=0.0
self.shape=None
self.geos=[]
self.k=0.0
#Errechnen der Winkel, Länge und Shape
norm_angle,self.l=self.calc_normal(self.Pa,self.Pb)
alpha,beta,self.teta,self.shape=self.calc_diff_angles(norm_angle,\
self.tan_a,\
self.tan_b,\
min_alpha)
if(self.l<min_len):
self.shape="Zero"
print "Zero"
pass
elif(self.shape=="LineGeo"):
#Erstellen der Geometrie
self.shape="LineGeo"
self.geos.append(LineGeo(self.Pa,self.Pb))
else:
#Berechnen der Radien, Mittelpunkte, Zwichenpunkt
r1, r2=self.calc_r1_r2(self.l,alpha,beta,self.teta)
if (abs(r1)>max_r)or(abs(r2)>max_r):
#Erstellen der Geometrie
self.shape="LineGeo"
self.geos.append(LineGeo(self.Pa,self.Pb))
return
O1, O2, k =self.calc_O1_O2_k(r1,r2,self.tan_a,self.teta)
#Berechnen der Start und End- Angles für das drucken
s_ang1,e_ang1=self.calc_s_e_ang(self.Pa,O1,k)
s_ang2,e_ang2=self.calc_s_e_ang(k,O2,self.Pb)
#Berechnen der Richtung und der Extend
dir_ang1=(tan_a-s_ang1)%(-2*pi)
dir_ang1-=ceil(dir_ang1/(pi))*(2*pi)
dir_ang2=(tan_b-e_ang2)%(-2*pi)
dir_ang2-=ceil(dir_ang2/(pi))*(2*pi)
#Erstellen der Geometrien
self.geos.append(ArcGeo(Pa=self.Pa,Pe=k,O=O1,r=r1,\
s_ang=s_ang1,e_ang=e_ang1,dir=dir_ang1))
self.geos.append(ArcGeo(Pa=k,Pe=self.Pb,O=O2,r=r2,\
s_ang=s_ang2,e_ang=e_ang2,dir=dir_ang2))
def calc_O1_O2_k(self,r1,r2,tan_a,teta):
#print("r1: %0.3f, r2: %0.3f, tan_a: %0.3f, teta: %0.3f" %(r1,r2,tan_a,teta))
#print("N1: x: %0.3f, y: %0.3f" %(-sin(tan_a), cos(tan_a)))
#print("V: x: %0.3f, y: %0.3f" %(-sin(teta+tan_a),cos(teta+tan_a)))
O1=PointClass(x=self.Pa.x-r1*sin(tan_a),\
y=self.Pa.y+r1*cos(tan_a))
k=PointClass(x=self.Pa.x+r1*(-sin(tan_a)+sin(teta+tan_a)),\
y=self.Pa.y+r1*(cos(tan_a)-cos(tan_a+teta)))
O2=PointClass(x=k.x+r2*(-sin(teta+tan_a)),\
y=k.y+r2*(cos(teta+tan_a)))
return O1, O2, k
def calc_normal(self,Pa,Pb):
norm_angle=Pa.norm_angle(Pb)
l=Pa.distance(Pb)
return norm_angle, l
def calc_diff_angles(self,norm_angle,tan_a,tan_b,min_alpha):
#print("Norm angle: %0.3f, tan_a: %0.3f, tan_b %0.3f" %(norm_angle,tan_a,tan_b))
alpha=(norm_angle-tan_a)
beta=(tan_b-norm_angle)
alpha,beta= self.limit_angles(alpha,beta)
if alpha*beta>0.0:
shape="C-shaped"
teta=alpha
elif abs(alpha-beta)<min_alpha:
shape="LineGeo"
teta=alpha
else:
shape="S-shaped"
teta=(3*alpha-beta)/2
return alpha, beta, teta, shape
def limit_angles(self,alpha,beta):
#print("limit_angles: alpha: %s, beta: %s" %(alpha,beta))
if (alpha<-pi):
alpha += 2*pi
if (alpha>pi):
alpha -= 2*pi
if (beta<-pi):
beta += 2*pi
if (beta>pi):
beta -= 2*pi
while (alpha-beta)>pi:
alpha=alpha-2*pi
while (alpha-beta)<-pi:
alpha=alpha+2*pi
#print(" -->> alpha: %s, beta: %s" %(alpha,beta))
return alpha,beta
def calc_r1_r2(self,l,alpha,beta,teta):
#print("alpha: %s, beta: %s, teta: %s" %(alpha,beta,teta))
r1=(l/(2*sin((alpha+beta)/2))*sin((beta-alpha+teta)/2)/sin(teta/2))
r2=(l/(2*sin((alpha+beta)/2))*sin((2*alpha-teta)/2)/sin((alpha+beta-teta)/2))
return r1, r2
def calc_s_e_ang(self,P1,O,P2):
s_ang=O.norm_angle(P1)
e_ang=O.norm_angle(P2)
return s_ang, e_ang
def check_biarc_fitting_tolerance(self,NURBS,epsilon,u0,u1):
check_step=(u1-u0)/5
check_u=[]
check_Pts=[]
fit_error=[]
for i in range(1,5):
check_u.append(u0+check_step*i)
check_Pts.append(NURBS.NURBS_evaluate(n=0,u=check_u[-1]))
fit_error.append(self.get_biarc_fitting_error(check_Pts[-1]))
if max(fit_error)>=epsilon:
#print self
#print fit_error
#print "Nein"
return 0
else:
#print "Ja"
#print self
return 1
def get_biarc_fitting_error(self,Pt):
#Abfrage in welchem Kreissegment der Punkt liegt:
w1=self.geos[0].O.norm_angle(Pt)
if (w1>=min([self.geos[0].s_ang,self.geos[0].e_ang]))and\
(w1<=max([self.geos[0].s_ang,self.geos[0].e_ang])):
diff=self.geos[0].O.distance(Pt)-abs(self.geos[0].r)
else:
diff=self.geos[1].O.distance(Pt)-abs(self.geos[1].r)
return abs(diff)
def __str__(self):
s= ("\nBiarc Shape: %s" %(self.shape))+\
("\nPa : %s; Tangent: %0.3f" %(self.Pa,self.tan_a))+\
("\nPb : %s; Tangent: %0.3f" %(self.Pb,self.tan_b))+\
("\nteta: %0.3f, l: %0.3f" %(self.teta,self.l))
for geo in self.geos:
s+=str(geo)
return s
class ArcGeo:
def __init__(self,Pa=None,Pe=None,O=None,r=1,s_ang=None,e_ang=None,dir=1):
self.type="ArcGeo"
self.Pa=Pa
self.Pe=Pe
self.O=O
self.r=abs(r)
#Falls nicht übergeben dann Anfangs- und Endwinkel ausrechen
if type(s_ang)==type(None):
s_ang=O.norm_angle(Pa)
if type(e_ang)==type(None):
e_ang=O.norm_angle(Pe)
#Aus dem Vorzeichen von dir den extend ausrechnen
self.ext=e_ang-s_ang
if dir>0.0:
self.ext=self.ext%(-2*pi)
self.ext-=floor(self.ext/(2*pi))*(2*pi)
else:
self.ext=self.ext%(-2*pi)
self.ext+=ceil(self.ext/(2*pi))*(2*pi)
self.s_ang=s_ang
self.e_ang=e_ang
self.length=self.r*abs(self.ext)
def plot2plot(self, plot):
#print self
x=[]; y=[]
#Alle 6 Grad ein Linien Segment Drucken
segments=int((abs(degrees(self.ext))//0.01)+1)
for i in range(segments+1):
ang=self.s_ang+i*self.ext/segments
x.append(self.O.x+cos(ang)*abs(self.r))
y.append(self.O.y+sin(ang)*abs(self.r))
plot.plot(x,y,'-g')
#plot.plot([x[0],x[-1]],[y[0],y[-1]],'cd')
plot.plot([self.Pa.x,self.Pe.x],[self.Pa.y,self.Pe.y],'cd')
def __str__(self):
return ("\nARC")+\
("\nPa : %s; s_ang: %0.5f" %(self.Pa,self.s_ang))+\
("\nPe : %s; e_ang: %0.5f" %(self.Pe,self.e_ang))+\
("\nO : %s; r: %0.3f" %(self.O,self.r))+\
("\next : %0.5f; length: %0.5f" %(self.ext,self.length))
class LineGeo:
def __init__(self,Pa,Pe):
self.type="LineGeo"
self.Pa=Pa
self.Pe=Pe
self.length=self.Pa.distance(self.Pe)
def get_start_end_points(self,direction):
if direction==0:
punkt=self.Pa
angle=self.Pe.norm_angle(self.Pa)
elif direction==1:
punkt=self.Pe
angle=self.Pa.norm_angle(self.Pe)
return punkt, angle
def plot2plot(self, plot):
#print self
plot.plot([self.Pa.x,self.Pe.x],[self.Pa.y,self.Pe.y],'-dm')
def distance2point(self,point):
AE=self.Pa.distance(self.Pe)
AP=self.Pa.distance(point)
EP=self.Pe.distance(point)
AEPA=(AE+AP+EP)/2
return abs(2*sqrt(abs(AEPA*(AEPA-AE)*(AEPA-AP)*(AEPA-EP)))/AE)
def __str__(self):
return ("\nLINE")+\
("\nPa : %s" %self.Pa)+\
("\nPe : %s" %self.Pe)+\
("\nlength: %0.5f" %self.length)
class PointClass:
def __init__(self,x=0,y=0):
self.x=x
self.y=y
def __str__(self):
return ('X ->%6.4f Y ->%6.4f' %(self.x,self.y))
def __cmp__(self, other) :
return (self.x == other.x) and (self.y == other.y)
def __neg__(self):
return -1.0*self
def __add__(self, other): # add to another point
return PointClass(self.x+other.x, self.y+other.y)
def __sub__(self, other):
return self + -other
def __rmul__(self, other):
return PointClass(other * self.x, other * self.y)
def __mul__(self, other):
if type(other)==list:
#Skalieren des Punkts
return PointClass(x=self.x*other[0],y=self.y*other[1])
else:
#Skalarprodukt errechnen
return self.x*other.x + self.y*other.y
def unit_vector(self,Pto=None):
diffVec=Pto-self
l=diffVec.distance()
return PointClass(diffVec.x/l,diffVec.y/l)
def distance(self,other=None):
if type(other)==type(None):
other=PointClass(x=0.0,y=0.0)
return sqrt(pow(self.x-other.x,2)+pow(self.y-other.y,2))
def norm_angle(self,other=None):
if type(other)==type(None):
other=PointClass(x=0.0,y=0.0)
return atan2(other.y-self.y,other.x-self.x)
def isintol(self,other,tol):
return (abs(self.x-other.x)<=tol) & (abs(self.y-other.y)<tol)
def transform_to_Norm_Coord(self,other,alpha):
xt=other.x+self.x*cos(alpha)+self.y*sin(alpha)
yt=other.y+self.x*sin(alpha)+self.y*cos(alpha)
return PointClass(x=xt,y=yt)
def get_arc_point(self,ang=0,r=1):
return PointClass(x=self.x+cos(radians(ang))*r,\
y=self.y+sin(radians(ang))*r)
def triangle_height(self,other1,other2):
#Die 3 Längen des Dreiecks ausrechnen
a=self.distance(other1)
b=other1.distance(other2)
c=self.distance(other2)
return sqrt(pow(b,2)-pow((pow(c,2)+pow(b,2)-pow(a,2))/(2*c),2))
class PlotClass(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(wx.NamedColor("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.add_toolbar() # comment this out for no toolbar
self.axes.set_title("NURBS and B-Spline Algorithms: ")
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
def make_nurbs_plot(self,CPoints=[],Points=[],Tang=[]):
xC=[]; yC=[]; xP=[]; yP=[]
for Cpt in CPoints:
xC.append(Cpt.x)
yC.append(Cpt.y)
for Pt in Points:
xP.append(Pt.x)
yP.append(Pt.y)
self.axes.plot(xC,yC,'-.xr',xP,yP,'-og')
if len(Tang)>0:
arrow_len=0.3
self.axes.hold(True)
for nr in range(len(Tang)):
self.axes.arrow(Points[nr].x,Points[nr].y,\
cos(Tang[nr])*arrow_len,\
sin(Tang[nr])*arrow_len,\
width=0.02)
self.canvas.show()
def make_nurbs_biarc_plot(self,biarcs):
self.axes.set_title("NURBS, BIARC Fitting Algorithms: ")
arrow_len=0.3
arrow_width=arrow_len*0.05
xP=[]
yP=[]
self.axes.hold(True)
for PtsVec in biarcs.PtsVec:
for Pt in PtsVec:
(Pt[0].x)
(Pt[0].y)
self.axes.plot([Pt[0].x],[Pt[0].y],'xr')
## self.plot1.arrow(Pt[0].x,Pt[0].y,\
## cos(Pt[1])*arrow_len,\
## sin(Pt[1])*arrow_len,\
## width=arrow_width)
for geo in biarcs.Curve:
geo.plot2plot(self.axes)
self.axes.axis('scaled')
#self.canvas.show()
def _onSize(self, event):
self._resizeflag = True
def _onIdle(self, evt):
if self._resizeflag:
self._resizeflag = False
self._SetSize()
self.draw()
def _SetSize(self, pixels = None):
"""
This method can be called to force the Plot to be a desired size, which defaults to
the ClientSize of the panel
"""
if not pixels:
pixels = self.GetClientSize()
self.canvas.SetSize(pixels)
self.figure.set_figsize_inches(pixels[0]/self.figure.get_dpi(),
pixels[1]/self.figure.get_dpi())
class ExamplesClass:
def __init__(self):
pass
def calc_nurbs_1(self):
#Initialisieren der NURBS Klasse
degree, CPoints, Weights, Knots=self.get_nurbs_()
Nurbs=NURBSClass(degree=degree,Knots=Knots,CPoints=CPoints,Weights=Weights)
#Berechnen von 30 Punkten des NURBS
Points, Tang=Nurbs.calc_curve(n=1,cpts_nr=30)
CPoints=CPoints
return CPoints, Points, Tang
def calc_bspline_1(self):
#Initialisieren der B-Spline Klasse
degree, CPts, Knots=self.get_bspline_1()
BSpline=BSplineClass(degree=degree,Knots=Knots,CPts=CPts)
#Berechnen von 30 Punkten des B-Spline bis zur ersten Ableitung
Points, Tang=BSpline.calc_curve(n=1,cpts_nr=30)
self.CPoints=[]
for CPt in CPts:
CPoints.append(PointClass(x=CPt[0],y=CPt[1]))
return CPoints, Points, Tang
def get_nurbs_6(self):
degree=3
Knots = [0.0, 0.0, 0.0, 0.0,\
0.10000000000000001, 0.10000000000000001, 0.10000000000000001, 0.10000000000000001,\
0.20000000000000001, 0.20000000000000001, 0.20000000000000001, 0.20000000000000001,\
0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 0.29999999999999999,\
0.40000000000000002, 0.40000000000000002, 0.40000000000000002, 0.40000000000000002,\
0.5, 0.5, 0.5, 0.5,\
0.59999999999999998, 0.59999999999999998, 0.59999999999999998, 0.59999999999999998,\
0.69999999999999996, 0.69999999999999996, 0.69999999999999996, 0.69999999999999996,\
0.79999999999999993, 0.79999999999999993, 0.79999999999999993, 0.79999999999999993,\
0.89999999999999991, 0.89999999999999991, 0.89999999999999991, 0.89999999999999991,\
1.0, 1.0, 1.0, 1.0]
Weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
CPoints=[]
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
CPoints.append(PointClass(x=-69.24930, y=-77.50000))
CPoints.append(PointClass(x=-69.24930, y=-77.50000))
CPoints.append(PointClass(x=-69.24930, y=-77.50000))
CPoints.append(PointClass(x=-60.97700, y=-77.50020))
CPoints.append(PointClass(x=-53.35560, y=-80.37450))
CPoints.append(PointClass(x=-47.33170, y=-85.17480))
CPoints.append(PointClass(x=-47.33170, y=-85.17480))
CPoints.append(PointClass(x=-47.33170, y=-85.17480))
CPoints.append(PointClass(x=-49.82390, y=-87.66700))
CPoints.append(PointClass(x=-49.82390, y=-87.66700))
CPoints.append(PointClass(x=-49.82390, y=-87.66700))
CPoints.append(PointClass(x=-55.19980, y=-83.49120))
CPoints.append(PointClass(x=-61.94320, y=-81.00010))
CPoints.append(PointClass(x=-69.24930, y=-81.00000))
CPoints.append(PointClass(x=-69.24930, y=-81.00000))
CPoints.append(PointClass(x=-69.24930, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25150, y=-81.00000))
CPoints.append(PointClass(x=-69.25150, y=-81.00000))
CPoints.append(PointClass(x=-69.25150, y=-81.00000))
CPoints.append(PointClass(x=-76.55740, y=-81.00030))
CPoints.append(PointClass(x=-83.30040, y=-83.49120))
CPoints.append(PointClass(x=-88.67610, y=-87.66700))
CPoints.append(PointClass(x=-88.67610, y=-87.66700))
CPoints.append(PointClass(x=-88.67610, y=-87.66700))
CPoints.append(PointClass(x=-91.16830, y=-85.17480))
CPoints.append(PointClass(x=-91.16830, y=-85.17480))
CPoints.append(PointClass(x=-91.16830, y=-85.17480))
CPoints.append(PointClass(x=-85.14470, y=-80.37460))
CPoints.append(PointClass(x=-77.52360, y=-77.50030))
CPoints.append(PointClass(x=-69.25150, y=-77.50000))
CPoints.append(PointClass(x=-69.25150, y=-77.50000))
CPoints.append(PointClass(x=-69.25150, y=-77.50000))
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
return degree, CPoints, Weights, Knots
def get_nurbs_61(self):
degree=3
Knots = [0.0, 0.0, 0.0, 0.0,\
0.10000000000000001, 0.10000000000000001, 0.10000000000000001, 0.10000000000000001,\
0.20000000000000001, 0.20000000000000001, 0.20000000000000001, 0.20000000000000001]
Weights = [1, 1, 1, 1, 1, 1, 1, 1]
CPoints=[]
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
CPoints.append(PointClass(x=-69.24930, y=-77.50000))
CPoints.append(PointClass(x=-69.24930, y=-77.50000))
CPoints.append(PointClass(x=-69.24930, y=-77.50000))
CPoints.append(PointClass(x=-60.97700, y=-77.50020))
CPoints.append(PointClass(x=-53.35560, y=-80.37450))
CPoints.append(PointClass(x=-47.33170, y=-85.17480))
return degree, CPoints, Weights, Knots
def get_nurbs_62(self):
degree=3
Knots = [0.20000000000000001, 0.20000000000000001, 0.20000000000000001, 0.20000000000000001,\
0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 0.29999999999999999,\
0.40000000000000002, 0.40000000000000002, 0.40000000000000002, 0.40000000000000002,\
0.5, 0.5, 0.5, 0.5,\
0.59999999999999998, 0.59999999999999998, 0.59999999999999998, 0.59999999999999998,\
0.69999999999999996, 0.69999999999999996, 0.69999999999999996, 0.69999999999999996,\
0.79999999999999993, 0.79999999999999993, 0.79999999999999993, 0.79999999999999993,\
0.89999999999999991, 0.89999999999999991, 0.89999999999999991, 0.89999999999999991,\
1.0, 1.0, 1.0, 1.0]
Weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
CPoints=[]
CPoints.append(PointClass(x=-47.33170, y=-85.17480))
CPoints.append(PointClass(x=-47.33170, y=-85.17480))
CPoints.append(PointClass(x=-49.82390, y=-87.66700))
CPoints.append(PointClass(x=-49.82390, y=-87.66700))
CPoints.append(PointClass(x=-49.82390, y=-87.66700))
CPoints.append(PointClass(x=-55.19980, y=-83.49120))
CPoints.append(PointClass(x=-61.94320, y=-81.00010))
CPoints.append(PointClass(x=-69.24930, y=-81.00000))
CPoints.append(PointClass(x=-69.24930, y=-81.00000))
CPoints.append(PointClass(x=-69.24930, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25000, y=-81.00000))
CPoints.append(PointClass(x=-69.25150, y=-81.00000))
CPoints.append(PointClass(x=-69.25150, y=-81.00000))
CPoints.append(PointClass(x=-69.25150, y=-81.00000))
CPoints.append(PointClass(x=-76.55740, y=-81.00030))
CPoints.append(PointClass(x=-83.30040, y=-83.49120))
CPoints.append(PointClass(x=-88.67610, y=-87.66700))
CPoints.append(PointClass(x=-88.67610, y=-87.66700))
CPoints.append(PointClass(x=-88.67610, y=-87.66700))
CPoints.append(PointClass(x=-91.16830, y=-85.17480))
CPoints.append(PointClass(x=-91.16830, y=-85.17480))
CPoints.append(PointClass(x=-91.16830, y=-85.17480))
CPoints.append(PointClass(x=-85.14470, y=-80.37460))
CPoints.append(PointClass(x=-77.52360, y=-77.50030))
CPoints.append(PointClass(x=-69.25150, y=-77.50000))
CPoints.append(PointClass(x=-69.25150, y=-77.50000))
CPoints.append(PointClass(x=-69.25150, y=-77.50000))
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
CPoints.append(PointClass(x=-69.25000, y=-77.50000))
return degree, CPoints, Weights, Knots
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = PlotClass()
if 1:
examples=ExamplesClass()
CPoints, Points, Tang=examples.calc_nurbs_1()
frame.make_nurbs_plot(CPoints,Points,Tang)
if 0:
examples=ExamplesClass()
degree, CPoints, Weights, Knots=examples.get_nurbs_6()
biarcfitting=BiarcFittingClass(degree, CPoints, Weights, Knots)
frame.make_nurbs_biarc_plot(biarcfitting)
# degree, CPoints, Weights, Knots=examples.get_nurbs_62()
# biarcfitting=BiarcFittingClass(degree, CPoints, Weights, Knots)
# frame.make_nurbs_biarc_plot(biarcfitting)
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| gpl-3.0 |
manoharan-lab/structural-color | structcol/phase_func_sphere.py | 1 | 31372 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 12:34:06 2018
@author: stephenson
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import gaussian_kde
from scipy.spatial.distance import cdist
import structcol as sc
from . import montecarlo as mc
from scipy.special import factorial
def get_exit_pos(norm_refl, norm_trans, radius):
'''
find the exit points of trajectories sent into a sphere
Parameters
----------
norm_refl: 2d array-like, shape (3, number of trajectories)
array of normal vectors for trajectories at their
reflection exit from the sphere
norm_trans: 2d array-like, shape (3, number of trajectoires)
array of normal vectors for trajectories at their
transmission exit from the sphere
norm_trans
radius: float-like
radius of the spherical boundary
Returns
-------
x_inter: array-like
x-coordinates of exit positions of trajectories
y_inter: array-like
y-coordinates of exit positions of trajectories
z_inter:array-like
z-coordinates of exit positions of trajectories
'''
# add the normal vectors for reflection and transmission to get
# normal vectors for all exits
norm = norm_trans + norm_refl
# get the x-coordinate
x_inter = norm[0,:]
x_inter = x_inter[x_inter!=0]*radius
# get the y-coordinate
y_inter = norm[1,:]
y_inter = y_inter[y_inter!=0]*radius
# get the z-coordinate
z_inter = norm[2,:]
z_inter = z_inter[z_inter!=0]*radius
return x_inter, y_inter, z_inter
def calc_pdf(x, y, z, radius, plot = False, phi_dependent = False,
nu_range = np.linspace(0.01, 1, 200),
phi_range = np.linspace(0, 2*np.pi, 300),
kz = None):
'''
Calculates kernel density estimate of probability density function
as a function of nu or nu and phi for a given set of x,y, and z coordinates
x, y, and z are the points on the sphere at which a trajectory exits
the sphere
Parameters
----------
x: 1d array-like
x-coordinate of each trajectory at exit event
y: 1d array-like
y-coordinate of each trajectory at exit event
z: 1d array-like
z-coordinate of each trajectory at exit event
radius: float
radius of sphere boundary
plot: boolean
If set to True, the intermediate and final pdfs will be plotted
phi_dependent: boolean (optional)
If set to True, the returned pdf will require both a nu and a phi
input
nu_range: 1d array (optional)
the nu values for which the pdf
phi_range: 1d array (optional)
the phi values for which to calculate the pdf, if the pdf is phi-dependent
kz: 1d array or None (optional)
the kz values at the exit events for all the trajectories
Returns
-------
pdf_array: 1d or 2d array
probability density function values as function of nu if
phi_dependent = False, and as a function of nu and phi if
phi_depenedent = True
Notes
-----
the probability density function is calculated as a function of nu and phi
instead of theta and phi to correct for the inequal areas on the sphere
surface for equal spacing in theta. Theta is related to nu by:
Theta = arccos(2*nu-1)
see http://mathworld.wolfram.com/SpherePointPicking.html for more details
'''
# calculate thetas for each exit point
# If optional parameter kz is specified, we calculate theta based on kz.
# If not, we calculate theta based on the z exit position
if kz is not None:
theta = np.arccos(kz)
else:
theta = np.arccos(z/radius)
# convert thetas to nus
nu = (np.cos(theta) + 1) / 2
# add reflections of data on to ends to prevent dips in distribution
# due to edges
nu_edge_correct = np.hstack((-nu, nu, -nu + 2))
if not phi_dependent:
# calculate the pdf kernel density estimate
pdf = gaussian_kde(nu_edge_correct)
# calculate the pdf for specific nu values
theta = np.linspace(0.01, np.pi, 200)
nu = (np.cos(theta)+1)/2
pdf_array = pdf(nu)
if plot == True:
# plot the distribution from data, with edge correction, and kde
plot_dist_1d(nu_range, nu, nu_edge_correct, pdf(nu_range))
plt.xlabel(r'$\nu$')
else:
# calculate phi for each exit point
phi = np.arctan2(y,x) + np.pi
# add reflections of data to ends to prevent dips in distribution
# due to edges
phi_edge_correct = np.tile(np.hstack((-phi, phi, -phi + 4*np.pi)),3)
nu_edge_correct = np.hstack((np.tile(-nu,3), np.tile(nu,3), np.tile(-nu+2,3)))
# calculate the pdf kernel density estimate
pdf = gaussian_kde(np.vstack([nu_edge_correct,phi_edge_correct]))
# calculate the pdf for specific nu and phi values
theta = np.linspace(0.01, np.pi, 200)
nu = (np.cos(theta)+1)/2
pdf_array = pdf(nu, phi_range)
if plot == True:
# plot the the calculated kernel density estimate in phi
nu_2d, phi_2d = np.meshgrid(nu_range, phi_range)
angle_range = np.vstack([nu_2d.ravel(), phi_2d.ravel()])
pdf_vals = np.reshape(pdf(angle_range), nu_2d.shape)
pdf_marg_nu = np.sum(pdf_vals, axis = 0)
# plot the nu distribution from data, with edge correction, and kde
plot_dist_1d(nu_range, nu, nu_edge_correct, pdf_marg_nu)
plt.xlabel(r'$\nu$')
# plot the phi distribution from data, with edge correction, and kde
pdf_marg_phi = np.sum(pdf_vals, axis = 1)
plot_dist_1d(phi_range, phi, phi_edge_correct, pdf_marg_phi)
plt.xlabel(r'$\phi$')
return pdf_array
def plot_phase_func(pdf, nu=np.linspace(0, 1, 200), phi=None, save=False):
'''
plots a given probability density function (pdf)
if the provided probability density is a function of only nu,
then the pdf is plotted against theta. We convert nu to theta because theta
is the more commonly used physical parameter in spherical coordinates.
if the provided probability density is a function of nu and phi,
then the pdf is plotted against theta and phi as a heatmap.
Parameters
----------
pdf: function, 1 or 2 arguments
probability density function that requires an input of nu values
or nu and phi values
nu: 1d array-like (optional)
y-coordinate of each trajectory at exit event
phi: None or 1d array-like (optional)
z-coordinate of each trajectory at exit event
save: boolean (optional)
tells whether or not to save the plot
Notes
-----
see http://mathworld.wolfram.com/SpherePointPicking.html for more details
on conversion between theta and nu
'''
# convert nu to theta
theta = np.arccos(2*nu-1)
if phi is None:
# calculate the phase function for theta points
phase_func = pdf(nu)/np.sum(pdf(nu)*np.diff(nu)[0])
# make polar plot in theta
plt.figure()
ax = plt.subplot(111,projection = 'polar')
ax.set_title(r'phase function in $\theta$')
ax.plot(theta, phase_func, linewidth =3, color = [0.45, 0.53, 0.9])
ax.plot(-theta, phase_func, linewidth =3, color = [0.45, 0.53, 0.9])
else:
# calculate the phase function for points in theta and phi
theta_2d, phi_2d = np.meshgrid(theta, phi)
nu_2d = (np.cos(theta_2d) + 1)/2
angles = np.vstack([nu_2d.ravel(), phi_2d.ravel()])
pdf_vals = np.reshape(pdf(angles), theta_2d.shape)
phase_func = pdf_vals/np.sum(pdf_vals*np.diff(phi)[0]*np.diff(theta)[0])
# make heatmap
fig, ax = plt.subplots()
cax = ax.imshow(phase_func, cmap = plt.cm.gist_earth_r, extent = [theta[0], theta[-1], phi[0], phi[-1]])
ax.set_xlabel('theta')
ax.set_ylabel('phi')
ax.set_xlim([theta[0], theta[-1]])
ax.set_ylim([phi[0], phi[-1]])
fig.colorbar(cax)
if save==True:
plt.savefig('phase_fun.pdf')
np.save('phase_function_data',phase_func)
def plot_dist_1d(var_range, var_data, var_data_edge_correct, pdf_var_vals):
'''
plots the probability distribution of a variable of interest
Parameters
----------
var_range: 1d array
array of values of variable whose pdf you want to find. Should sweep
whole range of interest.
var_data: 1d array-like
values of the variable of interest from data.
var_data_edge_correct: 1d array-like
values of the variable of interest corrected for edge effects in the
probability distribution
pdf_var_vals: 1d array
probability density values for variable values of var_range
if pdf is 2d, this array is marginalized over the other variable
'''
plt.figure()
# plot the kde using seaborn, from the raw data
sns.distplot(var_data, rug = True, hist = False,
label = 'distribution from data')
# plot the kde using seaborn, from edge corrected data
sns.distplot(var_data_edge_correct, rug = True, hist = False,
label = 'distribution with edge correction')
# renormalize the pdf
pdf_norm = pdf_var_vals/np.sum(pdf_var_vals*np.diff(var_range)[0])
# plot
plt.plot(var_range, pdf_norm,
label = 'kernel density estimate, correctly normalized')
plt.legend()
plt.xlim([var_range[0],var_range[-1]])
plt.ylabel('probability density')
def calc_directions(theta_sample, phi_sample, x_inter,y_inter, z_inter, k1, radius):
'''
calculates directions of exit trajectories
Parameters
---------_
theta_sample: 1d array
sampled thetas of exit trajectories
phi_sample: 1d array
sampled phis of exit trajectories
x_inter: 1d array-like
x-coordinate of each trajectory at exit event
y_inter: 1d array-like
y-coordinate of each trajectory at exit event
z_inter: 1d array-like
z-coordinate of each trajectory at exit event
k1: 2d array
direction vector for trajectories
radius: float-like
radius of sphere boundary
Returns
-------
k1: 2d array
direction vector for exit trajectories
'''
z_sample = radius*np.cos(theta_sample)
y_sample = radius*np.sin(phi_sample)*np.sin(theta_sample)
x_sample = radius*np.cos(phi_sample)*np.sin(theta_sample)
xa = np.vstack((x_sample,y_sample,z_sample)).T
xb = np.vstack((x_inter,y_inter, z_inter)).T
distances = cdist(xa,xb)
ind = np.argmin(distances, axis=1)
return k1[:,ind]
def plot_exit_points(x, y, z, radius, plot_dimension = '3d'):
'''
plots data corresponding to the x,y,z and radius inputs
the main use of this function is to plot data points over the heatmap
of the pdf for validation of the kernel density estimate
Parameters
----------
x: 1d array-like
x-coordinate of each trajectory at exit event
y: 1d array-like
y-coordinate of each trajectory at exit event
z: 1d array-like
z-coordinate of each trajectory at exit event
radius: float-like
radius of sphere boundary
plot_dimension: string (optional)
If set to '3d' the plot is a 3d plot on a sphere. If set to '2d', plots
on a 2d plot theta vs phi
'''
unit = '' # initialize unit to empty string
if isinstance(x, sc.Quantity):
unit = x.units # save unit for later use
x = x.to('um').magnitude
if isinstance(y, sc.Quantity):
y = y.to('um').magnitude
if isinstance(z, sc.Quantity):
z = z.to('um').magnitude
if isinstance(radius, sc.Quantity):
radius = radius.to('um').magnitude
if plot_dimension == '2d':
# calculate thetas for each exit point
theta = np.arccos(z/radius)
# calculate phi for each exit point
phi = np.arctan2(y,x) + np.pi
# plot
plt.plot(theta, phi, '.')
plt.xlim([0, np.pi])
plt.ylim([0, 2*np.pi])
if plot_dimension == '3d':
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('x ' + str(unit))
ax.set_ylabel('y ' + str(unit))
ax.set_zlabel('z ' + str(unit))
ax.set_title('exit positions')
ax.view_init(-164,-155)
ax.plot(x, y, z, '.')
def calc_d_avg(volume_fraction, radius):
'''
calculates the average spacing between structured spheres in a bulk film,
given their volume fraction
Parameters
----------
volume_fraction: float-like
volume fraction of structured spheres in a bulk film
radius: float-like
radius of structured spheres in a bulk film
Returns
-------
d_avg: float-like
average spacing between structured spheres in a bulk film
'''
# calculate the number density
number_density = volume_fraction/(4/3*np.pi*radius**3)
# calculate the average interparticle spacing
d_avg = 2*(3/(4*np.pi*number_density))**(1/3)
return d_avg
def calc_mu_scat_abs(refl_per_traj, trans_per_traj, trans_indices,
volume_fraction, radius, n_sample, wavelength):
'''
Calculates scattering coefficient and absorption coefficient using results
of the Monte Carlo calc_refl_trans() function
calculates the scattering length from the formula:
mu_scat = number density * total scattering cross section
where the scattering length is the inverse of the above expression:
lscat = 1/(number density * total scattering cross section)
and the total scattering cross section is found by integrating the
fraction of scattered light and multiplying by the initial area:
total scattering cross section = power scattered / incident intensity
= power scattered / (incident power / incident area)
= power scattered / incident power * 2*pi*radius**2
= (scattered fraction)*2*pi*radius**2
calculates the absorption length from the formula:
mu_abs = number density * total absorption cross section
where the absorption length is the inverse of the above expression:
l_abs = 1/(number density * total absorption cross section)
and the total absorption cross section is found by subtracting the
total absorption cross section = power absorbed / incident intensity
= power absorbed / (incident power / indcident area)
= power absorbed / incident power *2*pi*radius**2
= (absobred fraction)*2*pi*radius**2
Parameters
----------
refl_per_traj: 1d array
array of trajectory weights that exit through reflection, normalized
by the total number of trajectories
trans_per_traj: 1d array
array of trajectory weights that exit through transmission, normalized
by the total number of trajectories
trans_indices: 1d array
array of event indices at which trajectories exit structured sphere
through transmission
volume_fraction: float-like
volume fraction of structured spheres in a bulk film
radius: float-like
radius of structured spheres in a bulk film
n_sample: float-like
refractive index of the material surrounding the sphere, oven referred
to as the bulk matrix
wavelength: float-like
source light wavelength
Returns
-------
mu_scat: float-like
scattering coefficient for bulk film of structured spheres
mu_abs: float-like
absorption coefficient for bulk film of structured spheres
'''
# calculate the number density
number_density = volume_fraction/(4/3*np.pi*radius**3)
# calculate the total absorption cross section
# assumes no stuck
tot_abs_cross_section = (1 - np.sum(refl_per_traj + trans_per_traj))*2*np.pi*radius**2
# remove transmission contribution from trajectories that did not scatter
trans_per_traj[trans_indices == 1] = 0
# calculate the total scattering cross section
tot_scat_cross_section = np.sum(refl_per_traj + trans_per_traj)*2*np.pi*radius**2
# calculate mu_scat, mu_abs using the sphere
mu_scat = number_density*tot_scat_cross_section
mu_abs_sphere = number_density*tot_abs_cross_section
# don't need to include volume fraction for mu_abs_sphere component
# because already included in number_density
mu_abs_matrix = 4*np.pi*np.imag(n_sample)/wavelength
mu_abs = mu_abs_sphere + mu_abs_matrix*(1-volume_fraction)
return mu_scat, mu_abs
def calc_scat_bulk(refl_per_traj, trans_per_traj, trans_indices, norm_refl,
norm_trans, volume_fraction, diameter,
n_sample, wavelength,
plot=False, phi_dependent=False,
nu_range = np.linspace(0.01, 1, 200),
phi_range = np.linspace(0, 2*np.pi, 300),
kz=None):
'''
Parameters
----------
refl_per_traj: 1d array
array of trajectory weights that exit through reflection, normalized
by the total number of trajectories
trans_per_traj: 1d array
array of trajectory weights that exit through transmission, normalized
by the total number of trajectories
trans_indices: 1d array
array of event indices at which trajectories exit structured sphere
through transmission
norm_refl: 2d array-like, shape (3, number of trajectories)
array of normal vectors for trajectories at their
reflection exit from the sphere
norm_trans: 2d array-like, shape (3, number of trajectoires)
array of normal vectors for trajectories at their
transmission exit from the sphere
norm_trans
volume_fraction: float-like
volume fraction of structured spheres in a bulk film
diameter: float-like
diameter of structured spheres in a bulk film
n_sample: float-like
refractive index of the material surrounding the sphere, oven referred
to as the bulk matrix
wavelength: float-like
source light wavelength
plot: boolean (optional)
If set to True, the intermediate and final pdfs will be plotted
phi_dependent: boolean (optional)
If set to True, the returned pdf will require both a nu and a phi
input
nu_range: 1d array (optional)
the nu values for which the pdf
phi_range: 1d array (optional)
the phi values for which to calculate the pdf, if the pdf is phi-dependent
kz: None or 1d array (optional)
the kz values at the exit events of the trajectories
Returns
-------
p: 1d array
phase function for bulk film
mu_scat: float-like
scattering coefficient for bulk film
mu_abs: float-like
absorption coefficient for bulk film
'''
# get radius from diameter
radius = diameter/2
# calculate the lscat of the microsphere for use in the bulk simulation
mu_scat, mu_abs = calc_mu_scat_abs(refl_per_traj, trans_per_traj, trans_indices,
volume_fraction, radius, n_sample, wavelength)
# find the points on the sphere where trajectories exit
x_inter, y_inter, z_inter = get_exit_pos(norm_refl, norm_trans, radius)
# calculate the probability density function as a function of nu, which depends on the scattering angle
p = calc_pdf(x_inter, y_inter, z_inter, radius,
plot = plot,
phi_dependent = phi_dependent,
nu_range = nu_range,
phi_range = phi_range,
kz=kz)
return p, mu_scat, mu_abs
def size_distribution(diameter_range, mean, t):
'''
Depricated in Mie-separated branch. A nearly identical function exists in
model.py. The only difference is that this function is not normalized
Parameters
----------
diameter_range: array
Range of diameters of the distribution.
mean: 1-element array
Mean diameter of the distribution.
t: 1-element array
'Width' of the distribution. t = (1 - p**2) / p**2, where p is the
polydispersity index.
Returns
-------
distr: array (same length as diameter_range)
Schulz distribution as a fuction of diameter.
"""
'''
if t <= 100:
schulz = ((t+1)/mean)**(t+1) * diameter_range**t / factorial(t) * np.exp(-diameter_range/mean*(t+1))
distr = schulz
else:
std_dev = diameter_range / np.sqrt(t+1)
distr = np.exp(-(diameter_range - mean)**2 / (2 * std_dev**2)) / np.sqrt(2*np.pi*std_dev**2)
#distr = distr/np.sum(distr)
return(distr)
def calc_diam_list(num_diam, diameter_mean, pdi, equal_spacing = False, plot = True, num_pdf_points = 600):
'''
Calculate the list of radii to sample from for a given polydispersity and number of radii.
This function is used specifically to calculate a list of radii to sample
in the polydisperse bulk Monte Carlo model.
Parameters
----------
num_diam: int
number of diameters
diam_mean: float, sc.Quantity
mean radius of the distribution
pdi: float
polydispersity index of the distribution
equal_spacing: boolean
If True, the calculated list of radii is equally spaced, instead of
choosing points based on FWHM
plot: boolean
if True, the probability density function is plotted as a function of
radius, as well as the list of radii points
num_pdf_points: int
number of points at which to calculate the probability density function
should not need to change this value
Returns
-------
diam_list: 1d numpy array
list of diameters from which to sample in polydisperse bulk Monte Carlo
'''
# get radius from diameter
radius_mean = diameter_mean/2
# calculate the range of diameters at which to calculate the pdf
diam_range = np.linspace(1,4*radius_mean.magnitude, num_pdf_points)*radius_mean.units
# claculate the radii at equal spacings
if equal_spacing == True:
rad_mean = radius_mean.magnitude
num_half = int(np.round((num_diam +1)/2))
rad_list = np.unique(np.hstack((np.linspace(rad_mean/100, rad_mean, num_half),
np.linspace(rad_mean, 2*rad_mean, num_half))))*radius_mean.units
# calculate the radii based on FWHM
else:
# calculate pdf
t = (1-pdi**2)/pdi**2
pdf_range = size_distribution(diam_range, 2*radius_mean,t).magnitude
rad_range = diam_range.magnitude/2
# find radius at maximum of pdf
max_rad_ind = np.argmax(pdf_range)
max_rad = rad_range[max_rad_ind]
# calculate the list of radii
# This algorithm starts by finding the radius at the FWHM on either side
# of the maximum. Then is finds the radius at the FW(3/4)M, then FW(1/4)M,
# then FW(7/8)M, then FW(5/8)M, then FW(3/8)M...
rad_list = [max_rad]
num = 1
denom = 2
for i in range(0,num_diam-1,2):
rad_ind_1 = np.argmin(np.abs(pdf_range[0:int(num_pdf_points/2)]-(num/denom)*np.max(pdf_range)))
rad_ind_2 = np.argmin(np.abs(pdf_range[int(num_pdf_points/2):]-(num/denom)*np.max(pdf_range)))
rad_list.append(rad_range[rad_ind_1])
rad_list.append(rad_range[300 + rad_ind_2])
if num==1:
denom = 2*denom
num = denom-1
else:
num = num-2
# put the list in order and make it into a numpy array
rad_list.sort()
rad_list = np.array(rad_list)*radius_mean.units
# plot the radii over the pdf
if plot == True:
# calculate t for distrubtion
t = (1-pdi**2)/pdi**2
# calculate pdf
pdf = size_distribution(2*rad_list, 2*radius_mean, t)
if equal_spacing == True:
pdf_range = size_distribution(diam_range, 2*radius_mean,t)
plt.figure()
plt.scatter(2*rad_list, pdf, s = 45, color = [0.8,0.3,0.3])
plt.plot(diam_range, pdf_range, linewidth = 2.5)
plt.xlabel('diameter (' + str(radius_mean.units) + ')')
plt.ylabel('probability density')
# calc diameter from radius
diam_list = 2*rad_list
return diam_list
def sample_diams(pdi, diam_list, diam_mean, ntrajectories_bulk, nevents_bulk):
'''
Sample the radii to simulate polydispersity in the bulk Monte Carlo simulation
Parameters
----------
pdi: float
polydispersity index of the distribution
diam_list: 1d numpy array
list of diams from which to sample in polydisperse bulk Monte Carlo
diam_mean: float, sc.Quantity
mean diameter of the distribution
ntrajectories_bulk: int
number of trajectories in the bulk Monte Carlo simulation
nevents_bulk: int
number of trajectories in the bulk Monte Carlo simulation
Returns
-------
diams_sampled: 2d array (shape nevents_bulk, ntrajectories_bulk)
array of the samples microsphere diameters for polydisperity in the bulk
Monte Carlo calculations
'''
# calculate t for distrubtion
t = (1-pdi**2)/pdi**2
# calculate pdf
pdf = size_distribution(diam_list, diam_mean, t)
pdf_norm = pdf/np.sum(pdf)
# sample diameter distribution
diams_sampled = np.reshape(np.random.choice(diam_list.magnitude,
ntrajectories_bulk*nevents_bulk,
p = pdf_norm),
(nevents_bulk,ntrajectories_bulk))
return diams_sampled
def sample_concentration(p, ntrajectories_bulk, nevents_bulk):
'''
Sample the radii to simulate polydispersity in the bulk Monte Carlo simulation
using pre-calculated probabilities
Parameters
----------
p: 1d numpy array
probability distribution of parameters in rad_list
ntrajectories_bulk: int
number of trajectories in the bulk Monte Carlo simulation
nevents_bulk: int
number of trajectories in the bulk Monte Carlo simulation
Returns
-------
params_sampled: 2d array (shape nevents_bulk, ntrajectories_bulk)
array of the sample parameter for polydisperity in the bulk
Monte Carlo calculations
'''
# sample distribution
param_list = np.arange(np.size(p))+1
params_sampled = np.reshape(np.random.choice(param_list,
ntrajectories_bulk*nevents_bulk, p = p),
(nevents_bulk,ntrajectories_bulk))
return params_sampled
def sample_angles_step_poly(nevents_bulk, ntrajectories_bulk, p_sphere,
params_sampled, mu_scat_bulk, param_list=None):
'''
Calculate the list of radii to sample from for a given polydispersity and number of radii.
This function is used specifically to calculate a list of radii to sample
in the polydisperse bulk Monte Carlo model.
Parameters
----------
ntrajectories_bulk: int
number of trajectories in the bulk Monte Carlo simulation
nevents_bulk: int
number of trajectories in the bulk Monte Carlo simulation
p_sphere: 2d array (shape number of sphere types, number of angles)
phase function for a sphere, found from a Monte Carlo simulation
with spherical boundary conditions
params_sampled: 2d array (shape nevents_bulk, ntrajectories_bulk)
array of the sampled microsphere parameters (could be radius or diameter)
for polydisperity in the bulk Monte Carlo calculations
mu_scat_bulk: 1d array (sc.Quantity, length number of sphere types)
scattering coefficient for a sphere, calculated using Monte Carlo
simulation with spherical boundary conditions
param_list: 1d numpy array
list of parameters (usually radius or diameter) from which to sample
in polydisperse bulk Monte Carlo
Returns
-------
sintheta, costheta, sinphi, cosphi: ndarray
Sampled scattering and azimuthal angles sines and cosines.
step:ndarray
Sampled step sizes for all trajectories and scattering events
theta, phi: ndarray
Sampled scattering and azimuthal angles
'''
# get param_list
if param_list is None:
param_list = np.arange(p_sphere.shape[0])+1
elif isinstance(param_list, sc.Quantity):
param_list = param_list.magnitude
# get scattering length from scattering coefficient
lscat = 1/mu_scat_bulk
# Sample phi angles
rand = np.random.random((nevents_bulk,ntrajectories_bulk))
phi = 2*np.pi*rand
sinphi = np.sin(phi)
cosphi = np.cos(phi)
# Sample theta angles and calculate step size based on sampled radii
theta = np.zeros((nevents_bulk, ntrajectories_bulk))
lscat_rad_samp = np.zeros((nevents_bulk, ntrajectories_bulk))
angles = np.linspace(0.01,np.pi, p_sphere.shape[1])
# loop through all the radii, finding the positions of each radius
# in the sampled radii array, and assigning the appropriate phase fun and
# lscat for each one
for j in range(p_sphere.shape[0]):
ind_ev, ind_tr = np.where(params_sampled == param_list[j])
if ind_ev.size==0:
continue
prob = p_sphere[j,:]*np.sin(angles)*2*np.pi
prob_norm = prob/np.sum(prob)
# sample step sizes
rand = np.random.random(ind_ev.size)
lscat_rad_samp[ind_ev, ind_tr] = -np.log(1.0-rand)*lscat[j].magnitude
# sample angles
theta[ind_ev, ind_tr] = np.random.choice(angles, ind_ev.size,
p = prob_norm)
# calculate sines, cosines, and step
sintheta = np.sin(theta)
costheta = np.cos(theta)
step = lscat_rad_samp*np.ones((nevents_bulk, ntrajectories_bulk))*lscat.units
return sintheta, costheta, sinphi, cosphi, step, theta, phi
| gpl-3.0 |
GoogleCloudPlatform/plspm-python | tests/test_util.py | 1 | 1308 | #!/usr/bin/python3
#
# Copyright (C) 2019 Google Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd, numpy as np, numpy.testing as npt, plspm.util as util
def test_impute_missing_values():
input = pd.DataFrame(
{"a": [1, 2, np.NaN, 3, np.NaN],
"b": [1, 2, 3, 4, 5],
"c": [1, np.NaN, 3, 0, 4]})
expected_output = pd.DataFrame(
{"a": [1, 2, 2, 3, 2],
"b": [1, 2, 3, 4, 5],
"c": [1, 2, 3, 0, 4]})
npt.assert_array_equal(expected_output, util.impute(input))
def test_ranking():
data = pd.Series([0.75, -1.5, 3, -1.5, 15])
expected_rank = pd.Series([2, 1, 3, 1, 4])
assert util.rank(data).astype(int).equals(expected_rank)
| gpl-3.0 |
thesgc/chembiohub_ws | cbh_core_api/parser.py | 1 | 2248 | """Generic functions for processing SDF and XLSX files"""
from pandas import ExcelFile
import xlrd
from copy import copy
def get_widths(df):
'''Returns a list of the maximum string widths of a the columns in a dataframe in characters'''
widths = []
for col in df.columns.tolist():
col = unicode(col)
titlewidth = len(col)
try:
w = df[col].values.astype(unicode).str.len().max()
if w > titlewidth:
widths.append(int(w*1.2))
else:
widths.append(int(titlewidth * 1.2))
except:
widths.append(int(titlewidth * 1.2))
return widths
def is_true(item):
"""probably deprecated"""
if str(item).lower() in ["y", "true", "yes"]:
return True
else:
return False
def get_custom_field_config(filename, sheetname):
'''Early example of the import of a custom field config based upon a list of field names'''
xls = ExcelFile(filename)
data = xls.parse(sheetname, index_col=None, na_values=[''])
data.columns = ["name", "required", "description"]
data["required"] = data["required"].apply(is_true)
data = data.fillna('')
mydata = [{key: unicode(value) for key, value in point.items()} for point in data.T.to_dict().values()]
return mydata
def get_key_from_field_name(name):
'''Returns the field names used by elasticsearch'''
return unicode(name).replace(u" ", u"__space__")
def get_sheetnames(filename):
'''Returns a list of the sheet names in an Excel file'''
xls = xlrd.open_workbook(filename, on_demand=True)
return xls.sheet_names()
def get_sheet(filename, sheetname):
'''Extracts a list of dicts from a worksheet of an Excel file along with the
column names, data types and maximum widths'''
xls = ExcelFile(filename)
data = xls.parse(sheetname, index_col=None, na_values=[''])
data = data.fillna('')
orig_cols = tuple(data.columns)
replace = [get_key_from_field_name(column) for column in data.columns]
data.columns = replace
types = copy(data.dtypes)
for col in replace:
data[col] = data[col].values.astype(unicode)
return (data.T.to_dict().values(), orig_cols, types, get_widths(data))
| gpl-3.0 |
NeuroDataDesign/seelviz | Tony/clviz_web_tony_edits/atlasregiongraph.py | 1 | 3181 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
from plotly.offline import download_plotlyjs
from plotly.graph_objs import *
from plotly import tools
import plotly
import os
#os.chdir('C:/Users/L/Documents/Homework/BME/Neuro Data I/Data/')
import csv,gc # garbage memory collection :)
import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import axes3d
# from mpl_toolkits.mplot3d import axes3d
# from collections import namedtuple
import csv
import re
import matplotlib
import time
import seaborn as sns
from collections import OrderedDict
class atlasregiongraph(object):
"""Class for generating the color coded atlas region graphs"""
def __init__(self, token, path=None):
self._token = token
self._path = path
data_txt = ""
if path == None:
data_txt = token + '/' + token + '.csv'
else:
data_txt = path + '/' + token + '.csv'
self._data = np.genfromtxt(data_txt, delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
def generate_atlas_region_graph(self, path=None, numRegions = 10):
font = {'weight' : 'bold',
'size' : 18}
matplotlib.rc('font', **font)
thedata = self._data
if path == None:
thedata = self._data
else:
### load data
thedata = np.genfromtxt(self._token + '/' + self._token + '.csv', delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
region_dict = OrderedDict()
for l in thedata:
trace = 'trace' + str(l[3])
if trace not in region_dict:
region_dict[trace] = np.array([[l[0], l[1], l[2], l[3]]])
else:
tmp = np.array([[l[0], l[1], l[2], l[3]]])
region_dict[trace] = np.concatenate((region_dict.get(trace, np.zeros((1,4))), tmp), axis=0)
current_palette = sns.color_palette("husl", numRegions)
# print current_palette
data = []
for i, key in enumerate(region_dict):
trace = region_dict[key]
tmp_col = current_palette[i]
tmp_col_lit = 'rgb' + str(tmp_col)
trace_scatter = Scatter3d(
x = trace[:,0],
y = trace[:,1],
z = trace[:,2],
mode='markers',
marker=dict(
size=1.2,
color=tmp_col_lit, #'purple', # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.15
)
)
data.append(trace_scatter)
layout = Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
),
paper_bgcolor='rgb(0,0,0)',
plot_bgcolor='rgb(0,0,0)'
)
fig = Figure(data=data, layout=layout)
plotly.offline.plot(fig, filename= self._path + '/' + self._token + "_region_color.html")
| apache-2.0 |
low-sky/pyspeckit | pyspeckit/spectrum/toolbar.py | 7 | 4003 | from __future__ import print_function
from matplotlib.backend_bases import NavigationToolbar2
class NavigationToolbar3(NavigationToolbar2):
def press_pan(self, event):
print("pan pressed")
super(NavigationToolbar3,self).press_pan(self,event)
class MyNavToolbar(NavigationToolbar2):
"""wx/mpl NavToolbar hack with an additional tools user interaction.
This class is necessary because simply adding a new togglable tool to the
toolbar won't (1) radio-toggle between the new tool and the pan/zoom tools.
(2) disable the pan/zoom tool modes in the associated subplot(s).
"""
def __init__(self, canvas):
super(MyNavToolbar, self).__init__(canvas)
self.pan_tool = self.FindById(self._NTB2_PAN)
self.zoom_tool = self.FindById(self._NTB2_ZOOM)
self.Bind(wx.EVT_TOOL, self.on_toggle_pan_zoom, self.zoom_tool)
self.Bind(wx.EVT_TOOL, self.on_toggle_pan_zoom, self.pan_tool)
self.user_tools = {} # user_tools['tool_mode'] : wx.ToolBarToolBase
self.InsertSeparator(5)
self.add_user_tool('lasso', 6, icons.lasso_tool.ConvertToBitmap(), True, 'Lasso')
self.add_user_tool('gate', 7, icons.gate_tool.ConvertToBitmap(), True, 'Gate')
def add_user_tool(self, mode, pos, bmp, istoggle=True, shortHelp=''):
"""Adds a new user-defined tool to the toolbar.
mode -- the value that MyNavToolbar.get_mode() will return if this tool
is toggled on
pos -- the position in the toolbar to add the icon
bmp -- a wx.Bitmap of the icon to use in the toolbar
isToggle -- whether or not the new tool toggles on/off with the other
togglable tools
shortHelp -- the tooltip shown to the user for the new tool
"""
tool_id = wx.NewId()
self.user_tools[mode] = self.InsertSimpleTool(pos, tool_id, bmp,
isToggle=istoggle, shortHelpString=shortHelp)
self.Bind(wx.EVT_TOOL, self.on_toggle_user_tool, self.user_tools[mode])
def get_mode(self):
"""Use this rather than navtoolbar.mode
"""
for mode, tool in self.user_tools.items():
if tool.IsToggled():
return mode
return self.mode
def untoggle_mpl_tools(self):
"""Hack city: Since I can't figure out how to change the way the
associated subplot(s) handles mouse events: I generate events to turn
off whichever tool mode is enabled (if any).
This function needs to be called whenever any user-defined tool
(eg: lasso) is clicked.
"""
if self.pan_tool.IsToggled():
wx.PostEvent(
self.GetEventHandler(),
wx.CommandEvent(wx.EVT_TOOL.typeId, self._NTB2_PAN)
)
self.ToggleTool(self._NTB2_PAN, False)
elif self.zoom_tool.IsToggled():
wx.PostEvent(
self.GetEventHandler(),
wx.CommandEvent(wx.EVT_TOOL.typeId, self._NTB2_ZOOM)
)
self.ToggleTool(self._NTB2_ZOOM, False)
def on_toggle_user_tool(self, evt):
"""user tool click handler.
"""
if evt.Checked():
self.untoggle_mpl_tools()
#untoggle other user tools
for tool in self.user_tools.values():
if tool.Id != evt.Id:
self.ToggleTool(tool.Id, False)
def on_toggle_pan_zoom(self, evt):
"""Called when pan or zoom is toggled.
We need to manually untoggle user-defined tools.
"""
if evt.Checked():
for tool in self.user_tools.values():
self.ToggleTool(tool.Id, False)
# Make sure the regular pan/zoom handlers get the event
evt.Skip()
def reset_history(self):
"""More hacky junk to clear/reset the toolbar history.
"""
self._views.clear()
self._positions.clear()
self.push_current()
| mit |
jorik041/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
iarroyof/nlp-pipeline | nn_ancient.py | 1 | 2603 | from sknn.platform import cpu64, threading
from scipy import stats
from sklearn.grid_search import RandomizedSearchCV
from sknn.mlp import Regressor, Layer
from numpy import sqrt, loadtxt, savetxt, array
import sys
if len(sys.argv) < 4:
print "Usage: python nn.py train_vectors scores test_vectors searches"
exit()
hidden0 = "Tanh"#"Sigmoid"
hidden1 = "Tanh"
hidden2 = "Tanh"
output = "Sigmoid"
lrate = 0.001
niter = 100
units1 = 30
units2 = 10
X_train = loadtxt(sys.argv[1])
#print type(X_train)
y_train = loadtxt(sys.argv[2])
#print type(y_train)
X_valid = loadtxt(sys.argv[3])
#print type(X_valid)
try:
N = int(sys.argv[4]) # The number of searches
except IndexError:
N = 1
#print "X_train: %s"%(str(X_train.shape))
#print "y_train: %s"%(str(y_train.shape))
#print "X_test: %s"%(str(X_valid.shape))
nn = Regressor(
layers=[
Layer(hidden2, units=1),
Layer(hidden1, units=1),
Layer(hidden0, units=1),
Layer(output)],
learning_rate=lrate,
n_iter=niter)
rs = RandomizedSearchCV(nn, n_iter = 10, n_jobs = 4, param_distributions={
'learning_momentum': stats.uniform(0.1, 1.5),
'learning_rate': stats.uniform(0.009, 0.1),
'learning_rule': ['sgd'], #'momentum', 'nesterov', 'adadelta', 'adagrad', 'rmsprop'],
'regularize': ["L1", "L2", None],
'hidden0__units': stats.randint(2, 300),
'hidden0__type': ["Rectifier", "Sigmoid", "Tanh"],
'hidden1__units': stats.randint(2, 300),
'hidden1__type': ["Rectifier", "Sigmoid", "Tanh"],
'hidden2__units': stats.randint(4, 300),
'hidden2__type': ["Rectifier", "Sigmoid", "Tanh"],
'output__type': ["Linear", "Softmax"]})
#rs.fit(a_in, a_out)
if len(X_train) != len(y_train):
sys.stderr.write("Number of samples and number of labels do not match.")
exit()
for t in xrange(N):
crash = True
while(crash):
try:
rs.fit(X_train, y_train)
crash = False
except RuntimeError:
sys.stderr.write("--------------------- [Crashed by RunTimeERROR. restarting] --------------------- \n")
crash = True
sys.stderr.write("Best Parameters: %s, score: %s\n" % (str(rs.best_params_), str(rs.best_score_)))
y_ = rs.predict(X_valid)
y = []
for o in y_:
y.append(o[0])
input = sys.argv[3].split("/")[-1].split(".")[0]
y_out = {}
y_out['estimated_output'] = y
y_out['best_params'] = rs.best_params_
y_out['best_score'] = rs.best_score_
with open("nn_output_headlines_30_d2v_conv_300_m5.txt", "a") as f:
f.write(str(y_out)+'\n')
| gpl-3.0 |
queirozfcom/vector_space_retrieval | vsr/modules/metrics/measures/precision.py | 1 | 5448 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from collections import OrderedDict
# from sklearn.metrics import precision_recall_curve
import pylab as pl
import sys
# precision is the fraction of retrieved (actual) documents that are relevant (expected).
# takes two dicts; one containing expected results and the other containing actual results
# returns a dict { q_id => precision[q_id]}
def calculate(expected_results, actual_results, threshold = None):
# the query identifiers must match exactly
assert( sorted(actual_results.keys()) == sorted(expected_results.keys()) )
precisions = OrderedDict()
for query_id,expected_document_ids in expected_results.iteritems():
actual_document_ids = actual_results[query_id]
if threshold is not None:
# then i'll interpret is as a threshold for retrieved documents
# in order words, P@n
actual_document_ids = actual_document_ids[:threshold]
relevant_and_retrieved = filter(lambda el: el in expected_document_ids ,actual_document_ids)
precision = len(relevant_and_retrieved) / len(actual_document_ids)
precisions[query_id] = round(precision,3)
# make sure all queries have been accounted for
assert(sorted(actual_results.keys()) == sorted(expected_results.keys()) == sorted(precisions.keys()) )
return(precisions)
# return pairs (recall,precision), one for each recall point provided
def calculate_points(sorted_expected_results,sorted_actual_results):
recall_points = [0.0,0.1,0.2,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
# list of pairs
pairs = list()
for recall_point in recall_points:
# qual é a precisão do algoritmo se só considerarmos
# a proporção dada dos documentos relevantes retornados?
new_pair = [recall_point]
precisions_at_recall_point = list()
for query_id,expected_document_ids in sorted_expected_results.iteritems():
expected_results_for_query = expected_document_ids
actual_results_for_query = sorted_actual_results[query_id]
precision = _precision_at_recall_point(
expected_results_for_query,
actual_results_for_query,
recall_point)
precisions_at_recall_point.append(precision)
# the y-value is the average precision at point recall_point
# over all queries
average_precisions = sum(precisions_at_recall_point)/len(precisions_at_recall_point)
new_pair.append(average_precisions)
pairs.append(new_pair)
return(pairs)
# data uma lista de ids esperados e uma lista de ids recuperados, retorna a precisão
# se só considerarmos o recall_point dado. Se o recall_point for 1.0, então o resultado
# é a precisão normal
def _precision_at_recall_point(sorted_expected_doc_ids,sorted_actual_doc_ids,recall_point):
assert( isinstance(recall_point,float) )
# recall_point is given as a normalized proportion (from 0.0 to 1.0)
recall_threshold = int(len(sorted_expected_doc_ids)*recall_point)
# this is a little hack so as not to get division by zeros
# this is the minimum recall, so maximum precision
if recall_threshold == 0:
recall_threshold = 1
# these are the expected documents when only looking up to
# given recall threshold
# ?
# sorted_expected_doc_ids = sorted_expected_doc_ids[:recall_threshold]
# similarly, we only consider relevant the retrieved documents in that same threshold
# document_threshold = int(len(sorted_actual_doc_ids)*recall_point)
# sorted_actual_doc_ids = sorted_actual_doc_ids
# relevant_and_retrieved = _get_precision_at_absolute_recall(sorted_actual_doc_ids,sorted_actual_doc_ids,recall_threshold)
# filter(lambda el: el in sorted_expected_doc_ids ,sorted_actual_doc_ids)
# if len(sorted_actual_doc_ids) == 0:
# # this means we're the first recall point
# assert(document_threshold == recall_threshold == 0)
# # this is needed for the algorithm to work
# # see http://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-ranked-retrieval-results-1.html
# precision = 1.0
# else:
# precision = len(relevant_and_retrieved) / len(sorted_actual_doc_ids)
# what precision do I get if we only consider the first <recall_threshold>
# expected results?
precision = _get_precision_at_absolute_recall(
sorted_expected_doc_ids,
sorted_actual_doc_ids,
recall_threshold)
return(precision)
def _get_precision_at_absolute_recall(expected_results,actual_results,absolute_threshold):
hits = 0
total = 0
for idx,actual_id in enumerate(actual_results):
if hits >= absolute_threshold:
return(hits/total)
# if idx > absolute_threshold:
# # this will cause a threshold of 0 to return 1.0, which makes sense
# # because minimum recall equals maximum precision, which is 1.0
# return(running_precision)
if actual_id in expected_results:
hits += 1
total += 1
else:
total += 1
return(hits/total) | mit |
iABC2XYZ/abc | PIC/testParticles.py | 1 | 7077 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 17 15:17:47 2017
@author: A
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
plt.close('all')
numPart=np.int64(1e5)
xGridLog,yGridLog,zGridLog=4,4,6
xGrid,yGrid,zGrid=2**xGridLog,2**yGridLog,2**zGridLog
xMin=-14.
xMax=14.
yMin=-18.
yMax=18.
xyMean=np.array([0.,0.,0.,0.])
xEmit,yEmit=2.,2.
xBeta,yBeta=3.,3.
xAlpha,yAlpha=1.,1.
def GammaT(alphaT,betaT):
gammaT=(1.+alphaT**2)/betaT
return gammaT
def SigmaT(alphaT,betaT):
gammaT=GammaT(alphaT,betaT)
sigmaT=np.array([[betaT,-alphaT],[-alphaT,gammaT]])
return sigmaT
xSigmaT,ySigmaT= SigmaT(xAlpha,xBeta), SigmaT(yAlpha,yBeta)
O2=np.zeros((2,2))
xySigmaT=np.append(np.append(xSigmaT,O2,axis=0),np.append(O2,ySigmaT,axis=0),axis=1)
x,xp,y,yp=np.random.multivariate_normal(xyMean,xySigmaT,numPart).T
'''
plt.figure(1)
plt.plot(x,xp,'.')
plt.figure(2)
plt.plot(y,yp,'.')
'''
'''
dX=(xMax-xMin)/(xGrid-1.)
dY=(yMax-yMin)/(yGrid-1.)
partFlag=(x>=xMin) * (y>=yMin) * (x<xMax) * (y<yMax)
xUse=x[partFlag]
xpUse=xp[partFlag]
yUse=y[partFlag]
ypUse=yp[partFlag]
def InfXY(x,xMin,dX):
infX=(x-xMin)/dX
intX=np.floor(infX)
fracX=infX-intX
return intX,fracX
intX,fracX=InfXY(xUse,xMin,dX)
intY,fracY=InfXY(yUse,yMin,dY)
partGrid=np.zeros((xGrid,yGrid))
for iPart in range(len(intX)):
partGrid[intX,int
'''
rhoGrid,xArray,yArray=np.histogram2d(x,y,bins=(xGrid,yGrid),range=[[xMin,xMax],[yMin,yMax]])
dx=xArray[1]-xArray[0]
dy=yArray[1]-yArray[0]
rhoGrid/=(dx*dy)
xMid=(xArray[0:-1:]+xArray[1::])/2.
yMid=(yArray[0:-1:]+yArray[1::])/2.
yMidGrid,xMidGrid=np.meshgrid(yMid,xMid)
fig3=plt.figure(3)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,rhoGrid, cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.figure(4)
plt.plot(x,y,'.')
plt.show()
def CosK(N):
k=np.linspace(0.5,N-0.5,N)
m=k
K,M=np.meshgrid(k,m)
KM=K*M
cosKM=np.cos(np.pi*KM/N)
cosKM*=np.sqrt(2./N)
return cosKM
def SinK(N):
k=np.linspace(1,N,N)
m=k
K,M=np.meshgrid(k,m)
KM=K*M
sinKM=np.sin(np.pi*KM/(N+1))
sinKM*=np.sqrt(2./N)
return sinKM
def SinK_I(N):
sinKM=SinK(N)
sinKM*=1./(2.*(N+1))
return sinKM
def EigDirichlet(N,dx):
K=np.linspace(1,N,N)[:,np.newaxis]
Ek=-((2.*np.sin(np.pi*K/(2.*(N+1))))/dx)**2
return Ek
def EigNeumann(N,dx):
K=np.linspace(0,N-1,N)[:,np.newaxis]
Ek=-((2.*np.sin(np.pi*K/(2.*N)))/dx)**2
return Ek
sinKX=SinK(xGrid)
sinKY=SinK(yGrid)
Kx=EigDirichlet(xGrid,dx)
Ky=EigDirichlet(yGrid,dy)
kxGrid,kyGrid=np.meshgrid(Ky,Kx)
kXYGrid=kxGrid+kyGrid
rhoDst=np.matmul(sinKX,rhoGrid)
rhoDstDst=np.matmul(sinKY,rhoDst.T).T
uDstDst=rhoDstDst/kXYGrid
uDst=np.matmul(sinKY,uDstDst.T).T
u=np.matmul(sinKX,uDst)
u22=np.gradient(np.gradient(u,axis=0),axis=0)+np.gradient(np.gradient(u,axis=1),axis=1)
u22/=(dx*dy)
fig3=plt.figure(20)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(31)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u22, cmap=cm.coolwarm,linewidth=0, antialiased=False)
print np.sum(u22),np.sum(rhoGrid),(np.sum(u22)-np.sum(rhoGrid))/np.sum(rhoGrid)
#_______________________________________________________________________________
plt.close('all')
def ExpK(N):
k=np.linspace(0,N-1,N)
m=k
K,M=np.meshgrid(k,m)
KM=K*M
cosKM=np.cos(2.*np.pi*KM/N)
sinKM=np.sin(2.*np.pi*KM/N)
cosKM/=np.sqrt(N)
sinKM/=np.sqrt(N)
return cosKM,sinKM
def EigPeriodic(N,dx):
K=np.linspace(0,N-1,N)[:,np.newaxis]
Ek=-((2.*np.sin(np.pi*K/N))/dx)**2
return Ek
sinKX=SinK(xGrid)
cosKY,sinKY=ExpK(yGrid)
Kx=EigDirichlet(xGrid,dx)
Ky=EigPeriodic(yGrid,dy)
kxGrid,kyGrid=np.meshgrid(Ky,Kx)
kXYGrid=kxGrid+kyGrid
rhoDst=np.matmul(sinKX,rhoGrid)
rhoDstC=np.matmul(cosKY,rhoDst.T).T
rhoDstS=np.matmul(sinKY,rhoDst.T).T
uDstC=rhoDstC/kXYGrid
uDstS=rhoDstS/kXYGrid
uDst=np.matmul(cosKY,uDstC.T).T+np.matmul(sinKY,uDstS.T).T
u=np.matmul(sinKX,uDst)
u22=np.gradient(np.gradient(u,axis=0),axis=0)+np.gradient(np.gradient(u,axis=1),axis=1)
u22/=(dx*dy)
fig3=plt.figure(3)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,rhoGrid, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(20)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(31)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u22, cmap=cm.coolwarm,linewidth=0, antialiased=False)
print np.sum(u22),np.sum(rhoGrid),(np.sum(u22)-np.sum(rhoGrid))/np.sum(rhoGrid)
"""
#___________________________________
plt.close('all')
sinKX=SinK(xGrid)
sinKY=SinK(yGrid)
Kx=EigDirichlet(xGrid,dx)
Ky=EigDirichlet(yGrid,dy)
kxGrid,kyGrid=np.meshgrid(Ky,Kx)
kXYGrid=kxGrid+kyGrid
A=sinKX
B=sinKY
E=np.diag(Kx[:,0])
F=np.diag(Ky[:,0])
X=rhoGrid
ABT=np.matmul(A,B.T)
AXBF=np.matmul(np.matmul(np.matmul(A,X),B.T),F.T)
AEXB=np.matmul(np.matmul(np.matmul(A,E),X),B.T)
u=np.matmul(ABT,AXBF+AEXB)
u22=np.gradient(np.gradient(u,axis=0),axis=0)+np.gradient(np.gradient(u,axis=1),axis=1)
u22/=(dx*dy)
fig3=plt.figure(3)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,rhoGrid, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(20)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(31)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u22, cmap=cm.coolwarm,linewidth=0, antialiased=False)
print np.sum(u22),np.sum(rhoGrid),(np.sum(u22)-np.sum(rhoGrid))/np.sum(rhoGrid)
'''
sinKX=SinK(xGrid)
cosKY,sinKY=ExpK(yGrid)
Kx=EigDirichlet(xGrid,dx)
Ky=EigPeriodic(yGrid,dy)
kxGrid,kyGrid=np.meshgrid(Ky,Kx)
kXYGrid=kxGrid+kyGrid
F=kXYGrid
A=sinKX
AA=np.matmul(A,A)
B=cosKY
BB=np.matmul(B.T,B.T)
C=sinKY
CC=np.matmul(C.T,C.T)
X=rhoGrid
u=np.matmul(np.matmul(AA,X),(BB+CC))/F
u22=np.gradient(np.gradient(u,axis=0),axis=0)+np.gradient(np.gradient(u,axis=1),axis=1)
u22/=(dx*dy)
fig3=plt.figure(3)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,rhoGrid, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(20)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u, cmap=cm.coolwarm,linewidth=0, antialiased=False)
fig3=plt.figure(31)
ax = fig3.gca(projection='3d')
surf = ax.plot_surface(xMidGrid,yMidGrid,u22, cmap=cm.coolwarm,linewidth=0, antialiased=False)
print
'''
print(np.diag(Kx[:,0]))
"""
| gpl-3.0 |
giacbrd/CipCipPy | src/CipCipPy/classification/__init__.py | 1 | 8734 | # CipCipPy - Twitter IR system for the TREC Microblog track.
# Copyright (C) <2011-2015> Giacomo Berardi, Andrea Esuli, Diego Marcheggiani
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
CipCipPy
Classification with scikit-learn
"""
__version__ = "0.2"
__authors__ = ["Giacomo Berardi <[email protected]>",
"Andrea Esuli <[email protected]>",
"Diego Marcheggiani <[email protected]>"]
import scipy.spatial.distance
import numpy as np
from scipy.sparse import issparse
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.neighbors import NearestCentroid, KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import RidgeClassifier, LogisticRegression
class TrainingSet():
def __init__(self, rawTweets, tweetsToPop):
"""rawTweets is the initial training set, the first tweetsToPop tweets can be successively removed to refine
the training set, for example after a new sample is added to it"""
self.tweetId = []
self.tweetTarget = []
self.features = []
self.tweetsToPop = tweetsToPop
for tweet in rawTweets:
self.tweetId.append(tweet[0])
self.tweetTarget.append(1 if tweet[1] else 0)
self.features.append(tweet[2])
self.tfidf_vect = TfidfVectorizer(lowercase=False, min_df=1, binary=False, analyzer=lambda x: x)
self.matrix = None
def vectorize(self):
"""Compute vectors"""
self.matrix = self.tfidf_vect.fit_transform(self.features)
def vectorizeTest(self, testTweet):
"""Vectorize a tweet with idf"""
return self.tfidf_vect.transform([testTweet[2]])
def addExample(self, rawTweet):
"""Add a new example for retraining"""
self.tweetId.append(rawTweet[0])
self.tweetTarget.append(1 if rawTweet[1] else 0)
self.features.append(rawTweet[2])
def popOldExample(self):
"""Pop out the first example inserted"""
if self.tweetsToPop > 0:
self.tweetId.pop(0)
self.tweetTarget.pop(0)
self.features.pop(0)
self.tweetsToPop -= 1
class Classifier(object):
def retrain(self, vectorFeature, vectorTarget):
self.cl.fit(vectorFeature, vectorTarget)
def classify(self, vectorizedTest):
return self.cl.predict(vectorizedTest)[0]
class ProbClassifier(object):
def getProb(self, vectorizedTest):
return self.cl.predict_proba(vectorizedTest)[0][1]
# Implemented classifiers:
class NBClassifier(Classifier, ProbClassifier):
def __init__(self):
self.cl = MultinomialNB()
class SVMClassifier(Classifier):
def __init__(self):
self.cl = svm.SVC()
class OneClassClassifier(Classifier):
def __init__(self, nu=0.5):
self.cl = svm.OneClassSVM(nu=nu)
def retrain(self, vectorFeature, vectorTarget):
assert(vectorFeature.shape[0] == len(vectorTarget))
trueRows = [i for i, t in enumerate(vectorTarget) if t]
# get the rows with target==1 (positive samples)
vectors = vectorFeature[trueRows, :]
self.cl.fit(vectors)
class KNNClassifier(Classifier, ProbClassifier):
def __init__(self, neighbors=2):
self.cl = KNeighborsClassifier(n_neighbors=neighbors)
class ADAClassifier(Classifier, ProbClassifier):
def __init__(self, maxTreeDepth=1, estimators=50, learningRate=1.):
self.cl = AdaBoostClassifier(n_estimators=estimators, learning_rate=learningRate,
base_estimator=DecisionTreeClassifier(max_depth=maxTreeDepth))
def retrain(self, vectorFeature, vectorTarget):
# self.cl.fit([v.toarray()[0] for v in vectorFeature], vectorTarget)
self.cl.fit(vectorFeature, vectorTarget)
def classify(self, vectorizedTest):
# return self.cl.predict(vectorizedTest.toarray()[0])[0]
return self.cl.predict(vectorizedTest)[0]
def getProb(self, vectorizedTest):
# return self.cl.predict_proba(vectorizedTest.toarray()[0])[0][1]
return self.cl.predict_proba(vectorizedTest)[0][1]
class RClassifier(Classifier):
"""Ridge classifier"""
def __init__(self, alpha=1.):
self.cl = RidgeClassifier(alpha=alpha)
class LClassifier(Classifier):
def __init__(self, C=1.):
self.cl = LogisticRegression(C=C, class_weight='auto', penalty='l2')
class NCClassifier(Classifier):
"""Rocchio classifier"""
def __init__(self, shrink=None):
self.cl = NearestCentroid(shrink_threshold=shrink)
self.shrink = shrink
def retrain(self, vectorFeature, vectorTarget):
if self.shrink != None:
self.cl.fit([v.toarray()[0] for v in vectorFeature], vectorTarget)
else:
super(NCClassifier, self).retrain(vectorFeature, vectorTarget)
def classify(self, vectorizedTest):
if self.shrink != None:
return self.cl.predict(vectorizedTest.toarray()[0])[0]
else:
return super(NCClassifier, self).classify(vectorizedTest)
class RocchioClassifier(Classifier):
"""Rocchio classifier"""
def __init__(self, threshold = 0.5, distance_func = scipy.spatial.distance.cosine):
self.threshold = threshold
self.distance_func = distance_func
def retrain(self, vectorFeature, vectorTarget):
#FIXME optimize! (e.g. don't compute centrois if trueRows are the same). Optimize also the training set computations
assert(vectorFeature.shape[0] == len(vectorTarget))
trueRows = [i for i, t in enumerate(vectorTarget) if t]
# get the rows with target==1 (positive samples)
vectors = vectorFeature[trueRows, :]
#self.centroid = csr_matrix(vectors.mean(0))
self.centroid = vectors.mean(0)
def classify(self, vectorizedTest):
if issparse(vectorizedTest):
vectorizedTest = vectorizedTest.toarray()
if self.distance_func(vectorizedTest, self.centroid) < self.threshold:
return 1
else:
return 0
class NegativeRocchioClassifier(Classifier):
"""Rocchio classifier"""
def __init__(self, threshold = 0.5, distance_func = scipy.spatial.distance.cosine):
self.threshold = threshold
self.distance_func = distance_func
self.neg_centroid = np.zeros(1)
def retrain(self, vectorFeature, vectorTarget):
assert(vectorFeature.shape[0] == len(vectorTarget))
#FIXME operations on sparse matrices
pos_rows = [i for i, t in enumerate(vectorTarget) if t]
neg_rows = [i for i, t in enumerate(vectorTarget) if not t]
# get the rows with target==1 (positive samples)
pos_vectors = vectorFeature[pos_rows, :]
neg_vectors = [v.toarray() for v in vectorFeature[neg_rows, :] if v.getnnz()]
self.pos_centroid = pos_vectors.mean(0)
if not self.neg_centroid.any() and self.neg_centroid.shape[0] != self.pos_centroid.shape[0]:
self.neg_centroid = np.zeros(self.pos_centroid.shape[0])
else:
#FIXME efficiency!
self.neg_centroid = np.array([v for v in neg_vectors if self.distance_func(v, self.pos_centroid) > self.threshold]).mean(0)
def classify(self, vectorizedTest):
if issparse(vectorizedTest):
vectorizedTest = vectorizedTest.toarray()
#FIXME optime!
pos_dist = self.distance_func(vectorizedTest, self.pos_centroid)
neg_dist = self.distance_func(vectorizedTest, self.neg_centroid)
return 1 if pos_dist < neg_dist else 0
class DTClassifier(Classifier):
"""Decision Tree classifier"""
def __init__(self):
self.cl = DecisionTreeClassifier(random_state=0)
class RFClassifier(Classifier):
"""Random forest classifier"""
def __init__(self):
self.cl = RandomForestClassifier(n_jobs=2)
| gpl-3.0 |
MJuddBooth/pandas | asv_bench/benchmarks/timestamp.py | 2 | 3354 | import datetime
import dateutil
import pytz
from pandas import Timestamp
class TimestampConstruction(object):
def time_parse_iso8601_no_tz(self):
Timestamp('2017-08-25 08:16:14')
def time_parse_iso8601_tz(self):
Timestamp('2017-08-25 08:16:14-0500')
def time_parse_dateutil(self):
Timestamp('2017/08/25 08:16:14 AM')
def time_parse_today(self):
Timestamp('today')
def time_parse_now(self):
Timestamp('now')
def time_fromordinal(self):
Timestamp.fromordinal(730120)
def time_fromtimestamp(self):
Timestamp.fromtimestamp(1515448538)
class TimestampProperties(object):
_tzs = [None, pytz.timezone('Europe/Amsterdam'), pytz.UTC,
dateutil.tz.tzutc()]
_freqs = [None, 'B']
params = [_tzs, _freqs]
param_names = ['tz', 'freq']
def setup(self, tz, freq):
self.ts = Timestamp('2017-08-25 08:16:14', tzinfo=tz, freq=freq)
def time_tz(self, tz, freq):
self.ts.tz
def time_dayofweek(self, tz, freq):
self.ts.dayofweek
def time_weekday_name(self, tz, freq):
self.ts.day_name
def time_dayofyear(self, tz, freq):
self.ts.dayofyear
def time_week(self, tz, freq):
self.ts.week
def time_quarter(self, tz, freq):
self.ts.quarter
def time_days_in_month(self, tz, freq):
self.ts.days_in_month
def time_freqstr(self, tz, freq):
self.ts.freqstr
def time_is_month_start(self, tz, freq):
self.ts.is_month_start
def time_is_month_end(self, tz, freq):
self.ts.is_month_end
def time_is_quarter_start(self, tz, freq):
self.ts.is_quarter_start
def time_is_quarter_end(self, tz, freq):
self.ts.is_quarter_end
def time_is_year_start(self, tz, freq):
self.ts.is_year_start
def time_is_year_end(self, tz, freq):
self.ts.is_year_end
def time_is_leap_year(self, tz, freq):
self.ts.is_leap_year
def time_microsecond(self, tz, freq):
self.ts.microsecond
def time_month_name(self, tz, freq):
self.ts.month_name()
class TimestampOps(object):
params = [None, 'US/Eastern', pytz.UTC,
dateutil.tz.tzutc()]
param_names = ['tz']
def setup(self, tz):
self.ts = Timestamp('2017-08-25 08:16:14', tz=tz)
def time_replace_tz(self, tz):
self.ts.replace(tzinfo=pytz.timezone('US/Eastern'))
def time_replace_None(self, tz):
self.ts.replace(tzinfo=None)
def time_to_pydatetime(self, tz):
self.ts.to_pydatetime()
def time_normalize(self, tz):
self.ts.normalize()
def time_tz_convert(self, tz):
if self.ts.tz is not None:
self.ts.tz_convert(tz)
def time_tz_localize(self, tz):
if self.ts.tz is None:
self.ts.tz_localize(tz)
def time_to_julian_date(self, tz):
self.ts.to_julian_date()
def time_floor(self, tz):
self.ts.floor('5T')
def time_ceil(self, tz):
self.ts.ceil('5T')
class TimestampAcrossDst(object):
def setup(self):
dt = datetime.datetime(2016, 3, 27, 1)
self.tzinfo = pytz.timezone('CET').localize(dt, is_dst=False).tzinfo
self.ts2 = Timestamp(dt)
def time_replace_across_dst(self):
self.ts2.replace(tzinfo=self.tzinfo)
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/linear_model/setup.py | 83 | 1719 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.pyx'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.pyx'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
palful/yambopy | scripts/realtime/rt-ta.py | 4 | 2996 | #import matplotlib
#matplotlib.use('Agg') # prevents crashes if no X server present
from yambopy import *
import sys
import os
import argparse
import re
import numpy as np
import matplotlib.pyplot as plt
"""
After real-time calculations are completed, plots the Transient Absorption (TA) at different times.
Setting inside the script : prefix
"""
parser = argparse.ArgumentParser(description='Make Transiant Absorption (TA) plots from RT-BSE')
parser.add_argument('-f' ,'--folder' , help='Real-time folder (e.g.: rt-24x24)')
parser.add_argument('-j' ,'--job' , help='Name of job (e.g.: QSSIN-2.0eV)')
parser.add_argument('-p' ,'--prefix' , help='Prefix of the BSE calculations (e.g.: B-XRK-XG)')
parser.add_argument('-nt','--notext' , help='Skips the writing of the data', action='store_false')
args = parser.parse_args()
folder = args.folder
job = args.job
prefix = args.prefix
source = folder+'/'+job
print 'Packing relevant calculations'
pack_files_in_folder(source,mask=prefix)
print 'Done.'
data = YamboAnalyser(source)
output = data.get_data((prefix,'eps'))
# keys to read the outputs in order
keys=output.keys()
# sorting
s=[]
for i,key in enumerate(keys):
s.append((int(re.search("\d{1,}", key).group()),key))
s=sorted(s) # s is list of tuples (int,str)
# having integers allows to sort correctly
times = ['t'+str(i[0]) for i in s]
keys = [i[1] for i in s]
print "Sorted keys: ",keys
# output[key][n][0] is x
# output[key][n][1] is y
# Number of lines (number of x points)
nlines = len(output[keys[0]])
# We want to create a file that is x, y1, y2, ..., yn
array = np.zeros((nlines,len(keys)+1))
diff = np.zeros((nlines,len(keys)))
for l in range(0,nlines):
# first, value of x
x = output[keys[0]][l][0]
array[l][0]=x
diff[l][0]=x
# then all the y's
# t0
y0 = output[keys[0]][l][1]
array[l][1] = y0
# additional columns
for i,key in enumerate(keys):
# t_i > t0
y1 = output[key][l][1]
array[l][i+1]=y1
if i==0:
continue
diff[l][i]=y1-y0
# Writing
if args.notext:
print 'Writing data to files...'
f=open(job+prefix+'.dat','w')
string = 'eV'
for t in times:
string = string + '\t' + t
np.savetxt(f,array,delimiter='\t',header=string)
f.close()
f=open(job+'_diff.dat','w')
string = 'eV'
for i,t in enumerate(times):
if i==0:
continue
string = string + '\t' + t + '-t0'
np.savetxt(f,diff,delimiter='\t',header=string)
f.close()
print 'Writing done.'
else:
print '"--notext" flag'
# Plotting
fig,ax1=plt.subplots()
for i in range(1,len(diff[0])):
plt.plot(diff[:,0],diff[:,i],'-',label='%s-%s'%(times[i],times[0]))
plt.legend(loc=2)
plt.xlabel("eV")
plt.ylabel("TA (arb. units)")
plt.axvline(1.94)
plt.title("TA, job %s/%s in folder %s"%(job,prefix,folder))
ax2 = ax1.twinx()
#plt.plot(array[:,0],array[:,1],'k-',label='BSE@t0')
plt.show()
print 'Done.'
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.py | 15 | 46563 | #!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('Knots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = np.r_[np.zeros((1,) + c.shape[1:]),
c,
[c[-1]] * (k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| mit |
bhargav/scikit-learn | examples/applications/face_recognition.py | 48 | 5691 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset:
================== ============ ======= ========== =======
precision recall f1-score support
================== ============ ======= ========== =======
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
================== ============ ======= ========== =======
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
mtreinish/subunit2sql-analysis | run_time.py | 1 | 7720 | #!/bin/env python2
import operator
import sys
import collections
import matplotlib.pyplot as plt
from oslo.config import cfg
from oslo.db.sqlalchemy import utils as db_utils
import pandas as pd
from subunit2sql.db import api
from subunit2sql.db import models
from subunit2sql import shell
CONF = cfg.CONF
SHELL_OPTS = [
cfg.StrOpt('test_id', positional=True, required=True,
help='Test id to extract time series for'),
]
def cli_opts():
for opt in SHELL_OPTS:
CONF.register_cli_opt(opt)
def list_opts():
opt_list = copy.deepcopy(SHELL_OPTS)
return [('DEFAULT', opt_list)]
def generate_series(test_id):
session = api.get_session()
run_times = api.get_test_run_time_series(test_id, session)
session.close()
ts = pd.Series(run_times)
# ts = ts.truncate(after='11/26/2014')
# print len(ts)
# plot1 = pd.rolling_median(test, 100).plot()
plot = pd.rolling_mean(ts, 50).plot()
plot = ts.plot()
fig = plot.get_figure()
fig.savefig('/tmp/test.eps')
return ts
def get_metadata(id):
session = api.get_session()
query = db_utils.model_query(models.Test, session=session).filter_by(
id=id).join(models.TestRun).filter_by(status='success').join(
models.RunMetadata,
models.RunMetadata.run_id==models.TestRun.run_id).values(
models.TestRun.start_time,
models.TestRun.stop_time,
models.RunMetadata.key,
models.RunMetadata.value,
models.TestRun.status)
test_times = {}
valid_keys = ['build_node', 'build_name']
for run in query:
if run[4] != 'success':
continue
if run[0] not in test_times:
run_time = (run[1] - run[0]).total_seconds()
metadata = {run[2]: run[3]}
test_times[run[0]] = (run_time, metadata)
else:
test_times[run[0]][1][run[2]] = run[3]
metas = {}
metas_more = {}
metas_really_slow = {}
count = 0
count_more = 0
count_really_slow = 0
dates = []
series = {}
series_more = {}
series_really_slow = {}
for run in test_times:
if test_times[run][0] < 100:
if 'build_queue' in test_times[run][1]:
if test_times[run][1]['build_queue'] != 'gate':
continue
if 'build_branch' in test_times[run][1]:
if test_times[run][1]['build_branch'] == 'master':
continue
count = count + 1
for meta in test_times[run][1]:
if meta in metas:
metas[meta].append(test_times[run][1].get(meta))
else:
metas[meta] = [test_times[run][1].get(meta)]
dates.append(run)
series[run] = test_times[run][0]
elif test_times[run][0] >= 100:
if test_times[run][0] >= 175:
if 'build_queue' in test_times[run][1]:
if test_times[run][1]['build_queue'] != 'gate':
continue
if 'build_branch' in test_times[run][1]:
if test_times[run][1]['build_branch'] != 'master':
continue
count_really_slow = count_really_slow + 1
for meta in test_times[run][1]:
if meta in metas_really_slow:
metas_really_slow[meta].append(test_times[run][1].get(meta))
else:
metas_really_slow[meta] = [test_times[run][1].get(meta)]
series_really_slow[run] = test_times[run][0]
else:
if 'build_queue' in test_times[run][1]:
if test_times[run][1]['build_queue'] != 'gate':
continue
if 'build_branch' in test_times[run][1]:
if test_times[run][1]['build_branch'] != 'master':
continue
count_more = count_more + 1
for meta in test_times[run][1]:
if meta in metas_more:
metas_more[meta].append(test_times[run][1].get(meta))
else:
metas_more[meta] = [test_times[run][1].get(meta)]
series_more[run] = test_times[run][0]
vals = {}
trusty = 0
precise = 0
other = 0
vals_more = {}
trusty_more = 0
precise_more = 0
other_more = 0
vals_really_slow = {}
hp_really_slow = 0
rax_really_slow = 0
other_really_slow = 0
for meta in metas:
if meta == 'build_node':
for node in metas[meta]:
if 'trusty' in node:
trusty = trusty + 1
elif 'precise' in node:
precise = precise + 1
else:
other = other + 1
else:
vals[meta] = dict(collections.Counter(metas[meta]))
for meta in metas_more:
if meta == 'build_node':
for node in metas_more[meta]:
if 'hp' in node:
trusty_more = trusty_more + 1
elif 'rax' in node:
precise_more = precise_more + 1
else:
other_more = other_more + 1
else:
vals_more[meta] = dict(collections.Counter(metas_more[meta]))
for meta in metas_really_slow:
if meta == 'build_node':
for node in metas_really_slow[meta]:
if 'hp' in node:
hp_really_slow = hp_really_slow + 1
elif 'rax' in node:
rax_really_slow = rax_really_slow + 1
else:
other_really_slow = other_really_slow + 1
else:
vals_really_slow[meta] = dict(collections.Counter(metas_really_slow[meta]))
print "Fast Jobs:"
print 'Build Queues:'
print vals['build_queue']
# print 'Build Name'
# print vals['build_name']
print 'Build Branch'
print vals['build_branch']
print "trusty: %s, precise %s, other: %s" % (trusty, precise, other)
print max(dates)
print "Slow Jobs:"
print 'Build Queues:'
print vals_more['build_queue']
# print 'Build Name'
# print vals_more['build_name']
print 'Build Branch'
print vals_more['build_branch']
print "hp: %s, rax %s, other: %s" % (trusty_more, precise_more, other_more)
print sorted(vals_more['build_name'].items(), key=operator.itemgetter(1))
print "Really Slow Jobs:"
print 'Build Queues:'
print sorted(vals_really_slow['build_queue'].items(), key=operator.itemgetter(1))
# print 'Build Name'
# print vals_more['build_name']
print 'Build Branch'
print vals_really_slow['build_branch']
print "hp: %s, rax %s, other: %s" % (hp_really_slow, rax_really_slow, other_really_slow)
print sorted(vals_really_slow['build_name'].items(), key=operator.itemgetter(1))
ts_slow = pd.Series(series_more)
ts = pd.Series(series)
ts_really_slow = pd.Series(series_really_slow)
# plot = pd.rolling_mean(ts_slow, 60).plot()
plot = pd.rolling_mean(ts_slow, 60).plot()
plot2 = pd.rolling_mean(ts, 8).plot()
plot3 = pd.rolling_mean(ts_really_slow, 10).plot()
fig = plot.get_figure()
fig.savefig('/tmp/test2.png')
def main():
cli_opts()
shell.parse_args(sys.argv)
run_times = generate_series(CONF.test_id)
# NOTE(mtreinish) This call was used to investigate the split in run times
# on test_rescued_vm_detach_volume which shows clear splits in performance
#get_metadata(CONF.test_id)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
sgenoud/scikit-learn | sklearn/tests/test_multiclass.py | 1 | 7156 |
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
from nose.tools import assert_almost_equal
from nose.tools import assert_true
from nose.tools import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet, Ridge
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import svm
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
# FIXME: - should use sets
# - should move to metrics module
def multilabel_precision(Y_true, Y_pred):
n_predictions = 0
n_correct = 0
for i in range(len(Y_true)):
n_predictions += len(Y_pred[i])
for label in Y_pred[i]:
if label in Y_true[i]:
n_correct += 1
return float(n_correct) / n_predictions
def multilabel_recall(Y_true, Y_pred):
n_labels = 0
n_correct = 0
for i in range(len(Y_true)):
n_labels += len(Y_true[i])
for label in Y_pred[i]:
if label in Y_true[i]:
n_correct += 1
return float(n_correct) / n_labels
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC())
assert_raises(ValueError, ovr.predict, [])
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
pred2 = LinearSVC().fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_true(np.mean(iris.target == pred) >= 0.65)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set([1, 2]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.65, 0.74), (0.72, 0.84)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(multilabel_precision(Y_test, Y_pred), prec,
places=2)
assert_almost_equal(multilabel_recall(Y_test, Y_pred), recall,
places=2)
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC())
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC())
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC())
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC())
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC())
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC())
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(), code_size=2)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC())
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
memo/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 30 | 4292 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
koguiman/pokuya | development/Pokuya0.py | 1 | 6711 | ###Creator Koguiman, Santiago
###This script is used to generate the pokuya software
###pokuya is an open source software developed to obtain
###the same basic functionality as a power quiality analyzer
import pylab
import numpy, scipy
import matplotlib.pyplot as plt
from math import *
from cmath import *
from scipy import linalg as nn
import numpy as np
import csv
from scipy import signal as sg
from csvtonum import *
from formlayout import fedit
###########################
datalist = [("Provide file name",""),
('Number of cycles',3),
('Save report',True),
('Current channel',1)
]
filename=fedit(datalist,title="Pokuya",comment="Please provide")
print "File name:"
###########################
##########Single phase tests
###########inputs part
name=str(filename[0])
fe=float(50) #electrical frequecy. Eur=50Hz, American countries=60Hz
Ncycle=filename[1] #number of cycles to be analyzed
saverep=filename[2]
channcurr=filename[3]
###########output ports
reportname='{0}{1}'.format('reportof_',name)
########################
##open the file with data
x=csv.reader(open(name),delimiter=',')
###load data and info from source file
[data,info]=csvtodato(x)
####read data
time=data[:,0]; #time signal from scopes or pcs
time=time-min(time) #time signal shifted to zero t
Ts=abs(time[1]-time[0]); #sampling period
if (channcurr==1):
I=data[:,1] #current signal
V=data[:,3] #voltage signal
elif (channcurr==2):
I=data[:,3] #current signal
V=data[:,1] #voltage signal
###############If a resampling is necessary
Fs=ceil(1.0/(Ts)); #sampling frequecy
Ln=I.shape[0] #size of the signals
df=(Fs/Ln); ##steps for the frequency domain, dato from the normal signals
###Remark: in case df is not multiple of fe=50 Hz it is necessay the resampling
if (ceil(fe/df)!=fe/df):
Ttotal2=Ncycle*(1./fe);
df2=1./Ttotal2
l1=ceil(Ttotal2/Ts)
msamp=400
Ts2=(1./fe/msamp)
Ln2temp=ceil(Ttotal2/Ts2)
Ln2=Ln2temp
I,time=sg.resample(I[0:l1], Ln2, time[0:l1], axis=0, window=None)
V,time=sg.resample(V[0:l1], Ln2, time[0:l1], axis=0, window=None)
Ln=Ln2
Fs=1./Ts2#new sampling frequecy
###########Fourier transforms part
Idfft=2*np.fft.fft(array(I))/Ln
Vdfft=2*np.fft.fft(array(V))/Ln
##Fs=1.0/(Ts); #sampling frequecy
Freq = (Fs/Ln)*linspace(0,Ln-1,num=Ln);
F_Id=abs(Idfft) #Fourier magnitude Current
ph_Id=np.arctan2((Idfft).real,(Idfft).imag); #angle
F_Vd=abs(Vdfft) ##Fourier magnitude voltage
ph_Vd=np.arctan2((Vdfft).real,(Vdfft).imag); #angle voltage
#################################
pinst=V*I #instantaneous power
################################
###########Report stage
I0=F_Id[0] #DC component
V0=F_Vd[0] #DC component
f_50=ceil(fe/df2); #fundamental frequency position
I50=F_Id[f_50]/sqrt(2.)#fundamental current component rms
V50=F_Vd[f_50]/sqrt(2.)#fundamental voltage component
maxhar=50; #maximmun harmonic to be represented
Ihs=F_Id[f_50:f_50*maxhar:f_50]/sqrt(2.)#Harmonics current rms
Vhs=F_Vd[f_50:f_50*maxhar:f_50]/sqrt(2.)#Harmonics current rms
Fhs=Freq[f_50:f_50*maxhar:f_50]#Harmonics value
I_THDf=sqrt(sum(Ihs[1:Ihs.shape[0]]**2))/I50 #THD respect the fundamental
V_THDf=sqrt(sum(Vhs[1:Vhs.shape[0]]**2))/V50 #THD respect the fundamental
I_THDr=I_THDf/sqrt(1+(I_THDf**2))# THD respect the distorted signal
V_THDr=V_THDf/sqrt(1+(V_THDf**2))# THD respect the distorted signal
DPF=1./sqrt(1.+I_THDf) #displacement power factor
Irms=I50*sqrt(1+I_THDf**2)
Vrms=V50*sqrt(1+V_THDf**2)
PF=DPF*(I50/Irms) ##power factor
cos_phi=cos(ph_Vd[f_50]-ph_Id[f_50]) #cos? phi, phi:angle between the voltage and current at fe
Paver=mean(pinst) #average power
S50=V50*I50 #volt-ampers power fundamental
P50=S50*cos_phi #active power fundamental
Q50=sqrt(S50**2-P50**2)# VAr power fundamental
##put more analysis
##################################################################
###Report file generation
if (saverep==True):
with open(reportname, 'wb') as csvfile:
stwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
stwriter.writerow(['Report of :'] + [name])
stwriter.writerow(['Fundamental frequency [Hz]'] + [str(fe)])
stwriter.writerow(['DC current [A]'] + [str(I0)])
stwriter.writerow(['DC voltage [V]'] + [str(V0)])
stwriter.writerow(['Fundamental current rms [A]'] + [str(I50)]+['Fundamental current peak [A]'] + [str(I50*sqrt(2))])
stwriter.writerow(['Fundamental voltage rms [V]'] + [str(V50)]+['Fundamental voltage peak [V]'] + [str(V50*sqrt(2))])
stwriter.writerow(['Current: total RMS'] + [str(Irms)])
stwriter.writerow(['Voltage: total RMS'] + [str(Vrms)])
stwriter.writerow(['Current: THDf'] + [str(I_THDf)])
stwriter.writerow(['Voltage: THDf'] + [str(V_THDf)])
stwriter.writerow(['Current: THDr'] + [str(I_THDr)])
stwriter.writerow(['Voltage: THDr'] + [str(V_THDr)])
stwriter.writerow(['Displacement power factor DPF'] + [str(DPF)])
stwriter.writerow(['Power factor PF'] + [str(PF)])
stwriter.writerow(['Harmonics']+['Ih']+['%I1']+['Vh']+['%V1'])
# Fhs,Ihs,Vhs
Harmo=c_[Fhs,Ihs,(100*Ihs/I50),Vhs,(100*Vhs/V50)]
for i in Harmo:
stwriter.writerow([str(i)])
#################################################################
###########Ploting part
####time domain signals
plt.figure(1)
plt.subplot(311)
plt.plot(time[0:Ln],I)
plt.title('Current [A]')
plt.ylabel('I(t) [A]')
plt.grid(True)
plt.subplot(312)
plt.plot(time[0:Ln],V)
plt.title('Voltage [V]')
plt.ylabel('V(t) [V]')
plt.grid(True)
#plt.xlabel('Time [s]')
plt.subplot(313)
plt.plot(time[0:Ln],pinst)
plt.title('Instantaneous power [W]')
plt.ylabel('P(t) [W]')
plt.xlabel('Time [s]')
plt.grid(True)
###Fourier spectrum
lim=int(Freq.shape[0]/2)
plt.figure(2)
plt.subplot(211)
#plt.plot(Freq[0:lim],F_Id[0:lim])
plt.bar(np.hstack([array([0.0]),Fhs]),np.hstack([array([I0]),Ihs]), width=0.84, label="Current",align="center")
plt.xlim([0,50*50])
plt.ylabel('I($\omega$) [A$_{rms}$]')
plt.grid(True)
plt.subplot(212)
#plt.plot(Freq[0:lim],F_Vd[0:lim])
plt.bar(np.hstack([array([0.0]),Fhs]),np.hstack([array([V0]),Vhs]), width=0.84, label="Current",align="center")
plt.xlim([0,50*50])
plt.ylabel('V($\omega$) [V$_{rms}$]')
plt.grid(True)
plt.xlabel('Frequency [Hz]')
plt.show()
| gpl-2.0 |
ksthesis/gatk | src/main/python/org/broadinstitute/hellbender/gcnvkernel/io/io_metadata.py | 3 | 8160 | import csv
import logging
import numpy as np
import os
import pandas as pd
from typing import List
from . import io_commons
from . import io_consts
from .. import types
from ..structs.metadata import SampleReadDepthMetadata, SamplePloidyMetadata, SampleCoverageMetadata, \
SampleMetadataCollection
_logger = logging.getLogger(__name__)
def write_sample_coverage_metadata(sample_metadata_collection: SampleMetadataCollection,
sample_names: List[str],
output_file: str):
"""Write coverage metadata for all samples in a given `SampleMetadataCollection` to a single .tsv file
in the same order as `sample_names`.
Args:
sample_metadata_collection: an instance of `SampleMetadataCollection`
sample_names: list of samples to process
output_file: output .tsv file
Raises:
AssertionError: if some of the samples do not have `SampleCoverageMetadata` annotation
Returns:
None
"""
assert len(sample_names) > 0
assert sample_metadata_collection.all_samples_have_coverage_metadata(sample_names)
contig_list = sample_metadata_collection.sample_coverage_metadata_dict[sample_names[0]].contig_list
for sample_name in sample_names:
assert sample_metadata_collection.sample_coverage_metadata_dict[sample_name].contig_list == contig_list
parent_path = os.path.dirname(output_file)
io_commons.assert_output_path_writable(parent_path)
with open(output_file, 'w') as tsv_file:
writer = csv.writer(tsv_file, delimiter='\t')
header = [io_consts.sample_name_column_name] + [contig for contig in contig_list]
writer.writerow(header)
for sample_name in sample_names:
sample_coverage_metadata = sample_metadata_collection.get_sample_coverage_metadata(sample_name)
row = ([sample_name] + [repr(sample_coverage_metadata.n_j[j]) for j in range(len(contig_list))])
writer.writerow(row)
def read_sample_coverage_metadata(sample_metadata_collection: SampleMetadataCollection,
input_file: str,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char) -> List[str]:
"""Reads sample coverage metadata from a .tsv file and adds them to `sample_metadata_collection`.
Args:
sample_metadata_collection: collection to which the coverage metadata is to be added
input_file: input sample coverage metadata .tsv file
comment: comment character
delimiter: delimiter character
Returns:
list of samples in the same order as encountered in `input_file`
"""
coverage_metadata_pd = pd.read_csv(input_file, delimiter=delimiter, comment=comment)
found_columns_list = [str(column) for column in coverage_metadata_pd.columns.values]
io_commons.assert_mandatory_columns({io_consts.sample_name_column_name}, set(found_columns_list), input_file)
contig_list = found_columns_list.copy()
contig_list.remove(io_consts.sample_name_column_name)
num_contigs = len(contig_list)
sample_names = []
for tup in zip(coverage_metadata_pd[io_consts.sample_name_column_name],
*(coverage_metadata_pd[contig] for contig in contig_list)):
sample_name = str(tup[0])
n_j = np.asarray([int(tup[k + 1]) for k in range(num_contigs)], dtype=types.big_uint)
sample_metadata_collection.add_sample_coverage_metadata(SampleCoverageMetadata(
sample_name, n_j, contig_list))
sample_names.append(sample_name)
return sample_names
def update_sample_metadata_collection_from_ploidy_determination_calls(
sample_metadata_collection: SampleMetadataCollection,
input_calls_path: str,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char):
"""Reads the output of contig ploidy determination tool and updates the given instance of
`SampleMetadataCollection` for read depth and ploidy metadata.
Args:
sample_metadata_collection: the instance of `SampleMetadataCollection` to be updated
input_calls_path: posterior output path of contig ploidy determination tool
comment: comment character
delimiter: delimiter character
Returns:
None
"""
def get_sample_read_depth_metadata(input_path: str) -> SampleReadDepthMetadata:
sample_read_depth_file = os.path.join(input_path, io_consts.default_sample_read_depth_tsv_filename)
assert os.path.exists(sample_read_depth_file), \
"Sample read depth could not be found in the contig ploidy results " \
"located at \"{0}\"".format(input_path)
_sample_name = io_commons.extract_sample_name_from_header(sample_read_depth_file)
sample_read_depth_pd = pd.read_csv(sample_read_depth_file, delimiter=delimiter, comment=comment)
io_commons.assert_mandatory_columns(
SampleReadDepthMetadata.mandatory_tsv_columns,
{str(column) for column in sample_read_depth_pd.columns.values},
sample_read_depth_file)
global_read_depth = sample_read_depth_pd[io_consts.global_read_depth_column_name].values[0]
average_ploidy = sample_read_depth_pd[io_consts.average_ploidy_column_name].values[0]
return SampleReadDepthMetadata(_sample_name, global_read_depth, average_ploidy)
def get_sample_ploidy_metadata(input_path: str) -> SamplePloidyMetadata:
sample_ploidy_file = os.path.join(input_path, io_consts.default_sample_contig_ploidy_tsv_filename)
assert os.path.exists(sample_ploidy_file), \
"Sample ploidy results could not be found in the contig ploidy results " \
"located at \"{0}\"".format(input_path)
_sample_name = io_commons.extract_sample_name_from_header(sample_ploidy_file)
sample_ploidy_pd = pd.read_csv(sample_ploidy_file, delimiter=delimiter, comment=comment)
io_commons.assert_mandatory_columns(
SamplePloidyMetadata.mandatory_tsv_columns,
{str(column) for column in sample_ploidy_pd.columns.values},
sample_ploidy_file)
contig_list = [str(x) for x in sample_ploidy_pd[io_consts.contig_column_name].values]
ploidy_list = [int(x) for x in sample_ploidy_pd[io_consts.ploidy_column_name].values]
ploidy_gq_list = [float(x) for x in sample_ploidy_pd[io_consts.ploidy_gq_column_name].values]
return SamplePloidyMetadata(_sample_name,
np.asarray(ploidy_list, dtype=types.small_uint),
np.asarray(ploidy_gq_list, dtype=types.floatX),
contig_list)
_logger.info("Loading germline contig ploidy and global read depth metadata...")
assert os.path.exists(input_calls_path) and os.path.isdir(input_calls_path), \
"The provided path to ploidy determination results \"{0}\" is not a directory".format(input_calls_path)
subdirs = os.listdir(input_calls_path)
for subdir in subdirs:
if subdir.find(io_consts.sample_folder_prefix) >= 0:
sample_ploidy_results_dir = os.path.join(input_calls_path, subdir)
sample_name = io_commons.get_sample_name_from_txt_file(sample_ploidy_results_dir)
sample_read_depth_metadata = get_sample_read_depth_metadata(sample_ploidy_results_dir)
sample_ploidy_metadata = get_sample_ploidy_metadata(sample_ploidy_results_dir)
assert (sample_read_depth_metadata.sample_name == sample_name and
sample_ploidy_metadata.sample_name == sample_name), \
"Inconsistency detected in the ploidy determination results in {0}: sample name in the .txt " \
"file does not match with sample name in the posterior headers".format(sample_ploidy_results_dir)
sample_metadata_collection.add_sample_read_depth_metadata(sample_read_depth_metadata)
sample_metadata_collection.add_sample_ploidy_metadata(sample_ploidy_metadata)
| bsd-3-clause |
UCSD-E4E/radio_collar_tracker_drone | scripts/visualize_localization.py | 1 | 3058 | #!/usr/bin/env python3
import argparse
import glob
import os
import json
import numpy as np
from matplotlib import pyplot as plt; plt.ion()
from osgeo import gdal
import shapefile
class Ping(object):
"""RCT Ping"""
def __init__(self, lat, lon, amplitude, freq, alt, sequence):
super(Ping, self).__init__()
self.lat = lat
self.lon = lon
self.amplitude = amplitude
self.freq = freq
self.alt = alt
self.seq = sequence
class Estimate(object):
"""RCT Estimate"""
def __init__(self, lat, lon, alt, freq, sequence):
super(Estimate, self).__init__()
self.lat = lat
self.lon = lon
self.alt = alt
self.freq = freq
self.seq = sequence
if __name__ == '__main__':
parser = argparse.ArgumentParser('Visualizer for localization data')
parser.add_argument('data_dir', help="Data directory")
args = parser.parse_args()
# data_dir = "/media/ntlhui/FA56-CFCD/2019.05.05/RUN_000006/"
data_dir = args.data_dir
if os.path.isdir(data_dir):
data_dir = data_dir + os.path.sep
run_num = int(os.path.basename(os.path.dirname(data_dir)).split('_')[1])
localization_file = os.path.join(data_dir, "LOCALIZE_%06d" % (run_num))
assert(os.path.isfile(localization_file))
localization_data = open(localization_file)
pings = []
estimates = []
freqs = set()
est_seq = 1
ping_seq = 1
for line in localization_data:
data = json.loads(line)
if 'ping' in data.keys():
# got ping
ping = data['ping']
ping_obj = Ping(ping['lat'] / 1e7, ping['lon'] / 1e7, ping['amp'], ping['txf'], 0, ping_seq)
freqs.add(ping['txf'])
pings.append(ping_obj)
ping_seq += 1
if 'estimate' in data.keys():
# got estimate
estimate = data['estimate']
estimate_obj = Estimate(estimate['lat'], estimate['lon'], 0, 0, est_seq)
estimates.append(estimate_obj)
est_seq += 1
for freq in freqs:
h_freq = freq / 1e3
f_pings = [ping for ping in pings if ping.freq == freq]
f_est = [estimate for estimate in estimates]
if len(f_pings) < 5:
continue
writer = shapefile.Writer(os.path.join(data_dir, "LOCATION_%d_ping" % (h_freq)), shapeType = shapefile.POINT)
writer.field('amplitude', 'N', decimal = 10)
writer.field('sequence', 'N')
for ping in f_pings:
writer.point(ping.lon, ping.lat)
writer.record(ping.amplitude, ping.seq)
writer.close()
proj = open(os.path.join(data_dir, "LOCATION_%06d_ping.prj" % (h_freq)), 'w')
epsg1 = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'
proj.write(epsg1)
proj.close()
writer = shapefile.Writer(os.path.join(data_dir, "LOCATION_%d_est" % (h_freq)), shapeType = shapefile.POINT)
writer.field('sequence', 'N')
for est in f_est:
writer.point(est.lon, est.lat)
writer.record(est.seq)
writer.close()
proj = open(os.path.join(data_dir, "LOCATION_%06d_est.prj" % (h_freq)), 'w')
epsg1 = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'
proj.write(epsg1)
proj.close() | gpl-3.0 |
Tong-Chen/scikit-learn | sklearn/tests/test_hmm.py | 31 | 28118 | from __future__ import print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in range(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_length_one_signal(self):
obs = [self.prng.rand(10, self.n_features),
self.prng.rand(8, self.n_features),
self.prng.rand(1, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which has no identity
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in range(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| bsd-3-clause |
ucsd-progsys/ml2 | learning/input_old.py | 2 | 1868 | import pandas as pd
import numpy as np
import ntpath
def load_csv(path, filter_no_labels=False, balance_labels=True, only_slice=False):
'''Load feature vectors from a csv file.
Expects a header row with feature columns prefixed with 'F-' and
label columns prefixed with 'L-'.
@param filter_no_labels: if True, filter out samples where all labels are 0.
@param balance_labels: if True, balance the count of samples from
each class of labels, by duplicating samples from under-represented
classes.
@return: (dataframe, feature names, label names)
'''
df = pd.read_csv(path)
filenum = (ntpath.basename(path)).split('.')[0]
df['new_index'] = int(filenum)*(np.ones((len(df.index),1)))
df = df.set_index('new_index')
label_names = [c for c in df.columns if c[0] == 'L']
feature_names = [c for c in df.columns if c[0] == 'F']
if filter_no_labels:
# print df.shape
# filter out vectors with no predictions
criteria = (df[l] == 1.0 for l in label_names)
df = df[reduce(lambda x, acc: x | acc, criteria)]
# print df.shape
if only_slice:
if len(df[df['L-DidChange'] == 1]) == 0:
df = None
return (df, feature_names, label_names)
df = df[df['F-InSlice'] == 1].reset_index(drop=True)
if len(df) == 0 or len(df) == 1:
df = None
return (df, feature_names, label_names)
if len(df[df['L-DidChange'] == 1]) == 0:
print path
df = None
# if balance_labels:
# print df.shape
# classes = df.groupby(label_names)
# max_samples = max(len(c) for _, c in classes)
# print max_samples
# df = pd.concat(c.sample(max_samples, replace=True) for _, c in classes)
# print df.shape
return (df, feature_names, label_names)
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/semi_supervised/label_propagation.py | 14 | 15965 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
dboyliao/ibis | ibis/sql/tests/test_compiler.py | 2 | 49794 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ibis
from ibis.sql.compiler import build_ast, to_sql
from ibis.expr.tests.mocks import MockConnection
from ibis.compat import unittest
import ibis.common as com
import ibis.expr.api as api
import ibis.expr.operations as ops
class TestASTBuilder(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_ast_with_projection_join_filter(self):
table = self.con.table('test1')
table2 = self.con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
result = joined[[table3, table2['value']]]
ast = build_ast(result)
stmt = ast.queries[0]
def foo():
table3 = table[filter_pred]
joined = table2.inner_join(table3, [join_pred])
result = joined[[table3, table2['value']]]
return result
assert len(stmt.select_set) == 2
assert len(stmt.where) == 1
assert stmt.where[0] is filter_pred
# Check that the join has been rebuilt to only include the root tables
tbl = stmt.table_set
tbl_node = tbl.op()
assert isinstance(tbl_node, ops.InnerJoin)
assert tbl_node.left is table2
assert tbl_node.right is table
# table expression substitution has been made in the predicate
assert tbl_node.predicates[0].equals(table['g'] == table2['key'])
def test_ast_with_aggregation_join_filter(self):
table = self.con.table('test1')
table2 = self.con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
met1 = (table3['f'] - table2['value']).mean().name('foo')
result = joined.aggregate([met1, table3['f'].sum().name('bar')],
by=[table3['g'], table2['key']])
ast = build_ast(result)
stmt = ast.queries[0]
# hoisted metrics
ex_metrics = [(table['f'] - table2['value']).mean().name('foo'),
table['f'].sum().name('bar')]
ex_by = [table['g'], table2['key']]
# hoisted join and aggregate
expected_table_set = \
table2.inner_join(table, [table['g'] == table2['key']])
assert stmt.table_set.equals(expected_table_set)
# Check various exprs
for res, ex in zip(stmt.select_set, ex_by + ex_metrics):
assert res.equals(ex)
for res, ex in zip(stmt.group_by, ex_by):
assert stmt.select_set[res].equals(ex)
# Check we got the filter
assert len(stmt.where) == 1
assert stmt.where[0].equals(filter_pred)
def test_sort_by(self):
table = self.con.table('star1')
what = table.sort_by('f')
result = to_sql(what)
expected = """SELECT *
FROM star1
ORDER BY `f`"""
assert result == expected
what = table.sort_by(('f', 0))
result = to_sql(what)
expected = """SELECT *
FROM star1
ORDER BY `f` DESC"""
assert result == expected
what = table.sort_by(['c', ('f', 0)])
result = to_sql(what)
expected = """SELECT *
FROM star1
ORDER BY `c`, `f` DESC"""
assert result == expected
def test_limit(self):
table = self.con.table('star1').limit(10)
result = to_sql(table)
expected = """SELECT *
FROM star1
LIMIT 10"""
assert result == expected
table = self.con.table('star1').limit(10, offset=5)
result = to_sql(table)
expected = """SELECT *
FROM star1
LIMIT 10 OFFSET 5"""
assert result == expected
# Put the limit in a couple places in the stack
table = self.con.table('star1')
table = table[table.f > 0].limit(10)
result = to_sql(table)
expected = """SELECT *
FROM star1
WHERE `f` > 0
LIMIT 10"""
assert result == expected
table = self.con.table('star1')
# Semantically, this should produce a subquery
table = table.limit(10)
table = table[table.f > 0]
result2 = to_sql(table)
expected2 = """SELECT *
FROM (
SELECT *
FROM star1
LIMIT 10
) t0
WHERE `f` > 0"""
assert result2 == expected2
def test_join_with_limited_table(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
limited = t1.limit(100)
joined = (limited.inner_join(t2, [limited.foo_id == t2.foo_id])
[[limited]])
result = to_sql(joined)
expected = """SELECT t0.*
FROM (
SELECT *
FROM star1
LIMIT 100
) t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_sort_by_on_limit_yield_subquery(self):
# x.limit(...).sort_by(...)
# is semantically different from
# x.sort_by(...).limit(...)
# and will often yield different results
t = self.con.table('functional_alltypes')
expr = (t.group_by('string_col')
.aggregate([t.count().name('nrows')])
.limit(5)
.sort_by('string_col'))
result = to_sql(expr)
expected = """SELECT *
FROM (
SELECT `string_col`, count(*) AS `nrows`
FROM functional_alltypes
GROUP BY 1
LIMIT 5
) t0
ORDER BY `string_col`"""
assert result == expected
def test_multiple_limits(self):
t = self.con.table('functional_alltypes')
expr = t.limit(20).limit(10)
stmt = build_ast(expr).queries[0]
assert stmt.limit['n'] == 10
def test_top_convenience(self):
# x.top(10, by=field)
# x.top(10, by=[field1, field2])
pass
def test_self_aggregate_in_predicate(self):
# Per ibis #43
pass
class TestNonTabularResults(unittest.TestCase):
"""
"""
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_simple_scalar_aggregates(self):
from pandas import DataFrame
# Things like table.column.{sum, mean, ...}()
table = self.con.table('alltypes')
expr = table[table.c > 0].f.sum()
ast = build_ast(expr)
query = ast.queries[0]
sql_query = query.compile()
expected = """SELECT sum(`f`) AS `tmp`
FROM alltypes
WHERE `c` > 0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = DataFrame({'tmp': [5]})
assert handler(output) == 5
def test_table_column_unbox(self):
from pandas import DataFrame
table = self.table
m = table.f.sum().name('total')
agged = table[table.c > 0].group_by('g').aggregate([m])
expr = agged.g
ast = build_ast(expr)
query = ast.queries[0]
sql_query = query.compile()
expected = """SELECT `g`, sum(`f`) AS `total`
FROM alltypes
WHERE `c` > 0
GROUP BY 1"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = DataFrame({'g': ['foo', 'bar', 'baz']})
assert (handler(output) == output['g']).all()
def test_complex_array_expr_projection(self):
# May require finding the base table and forming a projection.
expr = (self.table.group_by('g')
.aggregate([self.table.count().name('count')]))
expr2 = expr.g.cast('double')
query = to_sql(expr2)
expected = """SELECT CAST(`g` AS double) AS `tmp`
FROM (
SELECT `g`, count(*) AS `count`
FROM alltypes
GROUP BY 1
) t0"""
assert query == expected
def test_scalar_exprs_no_table_refs(self):
expr1 = ibis.now()
expected1 = """\
SELECT now() AS `tmp`"""
expr2 = ibis.literal(1) + ibis.literal(2)
expected2 = """\
SELECT 1 + 2 AS `tmp`"""
cases = [
(expr1, expected1),
(expr2, expected2)
]
for expr, expected in cases:
result = to_sql(expr)
assert result == expected
def test_expr_list_no_table_refs(self):
exlist = ibis.api.expr_list([ibis.literal(1).name('a'),
ibis.now().name('b'),
ibis.literal(2).log().name('c')])
result = to_sql(exlist)
expected = """\
SELECT 1 AS `a`, now() AS `b`, ln(2) AS `c`"""
assert result == expected
def test_isnull_case_expr_rewrite_failure(self):
# #172, case expression that was not being properly converted into an
# aggregation
reduction = self.table.g.isnull().ifelse(1, 0).sum()
result = to_sql(reduction)
expected = """\
SELECT sum(CASE WHEN `g` IS NULL THEN 1 ELSE 0 END) AS `tmp`
FROM alltypes"""
assert result == expected
def _get_query(expr):
ast = build_ast(expr)
return ast.queries[0]
nation = api.table([
('n_regionkey', 'int32'),
('n_nationkey', 'int32'),
('n_name', 'string')
], 'nation')
region = api.table([
('r_regionkey', 'int32'),
('r_name', 'string')
], 'region')
customer = api.table([
('c_nationkey', 'int32'),
('c_name', 'string'),
('c_acctbal', 'double')
], 'customer')
class TestSelectSQL(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_nameless_table(self):
# Ensure that user gets some kind of sensible error
nameless = api.table([('key', 'string')])
self.assertRaises(com.RelationError, to_sql, nameless)
with_name = api.table([('key', 'string')], name='baz')
result = to_sql(with_name)
assert result == 'SELECT *\nFROM baz'
def test_physical_table_reference_translate(self):
# If an expression's table leaves all reference database tables, verify
# we translate correctly
table = self.con.table('alltypes')
query = _get_query(table)
sql_string = query.compile()
expected = "SELECT *\nFROM alltypes"
assert sql_string == expected
def test_simple_join_formatting(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
pred = t1['foo_id'] == t2['foo_id']
pred2 = t1['bar_id'] == t2['foo_id']
cases = [
(t1.inner_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""),
(t1.left_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""),
(t1.outer_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
FULL OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""),
# multiple predicates
(t1.inner_join(t2, [pred, pred2])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id` AND
t0.`bar_id` = t1.`foo_id`"""),
]
for expr, expected_sql in cases:
result_sql = to_sql(expr)
assert result_sql == expected_sql
def test_multiple_join_cases(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
predA = t1['foo_id'] == t2['foo_id']
predB = t1['bar_id'] == t3['bar_id']
what = (t1.left_join(t2, [predA])
.inner_join(t3, [predB])
.projection([t1, t2['value1'], t3['value2']]))
result_sql = to_sql(what)
expected_sql = """SELECT t0.*, t1.`value1`, t2.`value2`
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
INNER JOIN star3 t2
ON t0.`bar_id` = t2.`bar_id`"""
assert result_sql == expected_sql
def test_join_between_joins(self):
t1 = api.table([
('key1', 'string'),
('key2', 'string'),
('value1', 'double'),
], 'first')
t2 = api.table([
('key1', 'string'),
('value2', 'double'),
], 'second')
t3 = api.table([
('key2', 'string'),
('key3', 'string'),
('value3', 'double'),
], 'third')
t4 = api.table([
('key3', 'string'),
('value4', 'double')
], 'fourth')
left = t1.inner_join(t2, [('key1', 'key1')])[t1, t2.value2]
right = t3.inner_join(t4, [('key3', 'key3')])[t3, t4.value4]
joined = left.inner_join(right, [('key2', 'key2')])
# At one point, the expression simplification was resulting in bad refs
# here (right.value3 referencing the table inside the right join)
exprs = [left, right.value3, right.value4]
projected = joined.projection(exprs)
result = to_sql(projected)
expected = """SELECT t0.*, t1.`value3`, t1.`value4`
FROM (
SELECT t2.*, t3.`value2`
FROM `first` t2
INNER JOIN second t3
ON t2.`key1` = t3.`key1`
) t0
INNER JOIN (
SELECT t2.*, t3.`value4`
FROM third t2
INNER JOIN fourth t3
ON t2.`key3` = t3.`key3`
) t1
ON t0.`key2` = t1.`key2`"""
assert result == expected
def test_join_just_materialized(self):
t1 = self.con.table('tpch_nation')
t2 = self.con.table('tpch_region')
t3 = self.con.table('tpch_customer')
# GH #491
joined = (t1.inner_join(t2, t1.n_regionkey == t2.r_regionkey)
.inner_join(t3, t1.n_nationkey == t3.c_nationkey))
result = to_sql(joined)
expected = """SELECT *
FROM tpch_nation t0
INNER JOIN tpch_region t1
ON t0.`n_regionkey` = t1.`r_regionkey`
INNER JOIN tpch_customer t2
ON t0.`n_nationkey` = t2.`c_nationkey`"""
assert result == expected
result = to_sql(joined.materialize())
assert result == expected
def test_join_no_predicates_for_impala(self):
# Impala requires that joins without predicates be written explicitly
# as CROSS JOIN, since result sets can accidentally get too large if a
# query is executed before predicates are written
t1 = self.con.table('star1')
t2 = self.con.table('star2')
joined2 = t1.cross_join(t2)[[t1]]
expected = """SELECT t0.*
FROM star1 t0
CROSS JOIN star2 t1"""
result2 = to_sql(joined2)
assert result2 == expected
for jtype in ['inner_join', 'left_join', 'outer_join']:
joined = getattr(t1, jtype)(t2)[[t1]]
result = to_sql(joined)
assert result == expected
def test_semi_anti_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
joined = t1.semi_join(t2, [t1.foo_id == t2.foo_id])[[t1]]
result = to_sql(joined)
expected = """SELECT t0.*
FROM star1 t0
LEFT SEMI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
joined = t1.anti_join(t2, [t1.foo_id == t2.foo_id])[[t1]]
result = to_sql(joined)
expected = """SELECT t0.*
FROM star1 t0
LEFT ANTI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_self_reference_simple(self):
t1 = self.con.table('star1')
result_sql = to_sql(t1.view())
expected_sql = "SELECT *\nFROM star1"
assert result_sql == expected_sql
def test_join_self_reference(self):
t1 = self.con.table('star1')
t2 = t1.view()
result = t1.inner_join(t2, [t1.foo_id == t2.bar_id])[[t1]]
result_sql = to_sql(result)
expected_sql = """SELECT t0.*
FROM star1 t0
INNER JOIN star1 t1
ON t0.`foo_id` = t1.`bar_id`"""
assert result_sql == expected_sql
def test_join_projection_subquery_broken_alias(self):
# From an observed bug, derived from tpch tables
geo = (nation.inner_join(region, [('n_regionkey', 'r_regionkey')])
[nation.n_nationkey,
nation.n_name.name('nation'),
region.r_name.name('region')])
expr = (geo.inner_join(customer, [('n_nationkey', 'c_nationkey')])
[customer, geo])
result = to_sql(expr)
expected = """SELECT t1.*, t0.*
FROM (
SELECT t2.`n_nationkey`, t2.`n_name` AS `nation`, t3.`r_name` AS `region`
FROM nation t2
INNER JOIN region t3
ON t2.`n_regionkey` = t3.`r_regionkey`
) t0
INNER JOIN customer t1
ON t0.`n_nationkey` = t1.`c_nationkey`"""
assert result == expected
def test_where_simple_comparisons(self):
t1 = self.con.table('star1')
what = t1.filter([t1.f > 0, t1.c < t1.f * 2])
result = to_sql(what)
expected = """SELECT *
FROM star1
WHERE `f` > 0 AND
`c` < (`f` * 2)"""
assert result == expected
def test_where_in_array_literal(self):
# e.g.
# where string_col in (v1, v2, v3)
raise unittest.SkipTest
def test_where_with_join(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# This also tests some cases of predicate pushdown
what = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
.projection([t1, t2.value1, t2.value3])
.filter([t1.f > 0, t2.value3 < 1000]))
what2 = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
.filter([t1.f > 0, t2.value3 < 1000])
.projection([t1, t2.value1, t2.value3]))
expected_sql = """SELECT t0.*, t1.`value1`, t1.`value3`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE t0.`f` > 0 AND
t1.`value3` < 1000"""
result_sql = to_sql(what)
assert result_sql == expected_sql
result2_sql = to_sql(what2)
assert result2_sql == expected_sql
def test_where_no_pushdown_possible(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
joined = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
[t1, (t1.f - t2.value1).name('diff')])
filtered = joined[joined.diff > 1]
# TODO: I'm not sure if this is exactly what we want
expected_sql = """SELECT *
FROM (
SELECT t0.*, t0.`f` - t1.`value1` AS `diff`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE t0.`f` > 0 AND
t1.`value3` < 1000
)
WHERE `diff` > 1"""
raise unittest.SkipTest
result_sql = to_sql(filtered)
assert result_sql == expected_sql
def test_where_with_between(self):
t = self.con.table('alltypes')
what = t.filter([t.a > 0, t.f.between(0, 1)])
result = to_sql(what)
expected = """SELECT *
FROM alltypes
WHERE `a` > 0 AND
`f` BETWEEN 0 AND 1"""
assert result == expected
def test_where_analyze_scalar_op(self):
# root cause of #310
table = self.con.table('functional_alltypes')
expr = (table.filter([table.timestamp_col <
(ibis.timestamp('2010-01-01') + ibis.month(3)),
table.timestamp_col < (ibis.now() +
ibis.day(10))])
.count())
result = to_sql(expr)
expected = """\
SELECT count(*) AS `tmp`
FROM functional_alltypes
WHERE `timestamp_col` < months_add('2010-01-01 00:00:00', 3) AND
`timestamp_col` < days_add(now(), 10)"""
assert result == expected
def test_simple_aggregate_query(self):
t1 = self.con.table('star1')
cases = [
(t1.aggregate([t1['f'].sum().name('total')],
[t1['foo_id']]),
"""SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1"""),
(t1.aggregate([t1['f'].sum().name('total')],
['foo_id', 'bar_id']),
"""SELECT `foo_id`, `bar_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1, 2""")
]
for expr, expected_sql in cases:
result_sql = to_sql(expr)
assert result_sql == expected_sql
def test_aggregate_having(self):
# Filtering post-aggregation predicate
t1 = self.con.table('star1')
total = t1.f.sum().name('total')
metrics = [total]
expr = t1.aggregate(metrics, by=['foo_id'],
having=[total > 10])
result = to_sql(expr)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING sum(`f`) > 10"""
assert result == expected
expr = t1.aggregate(metrics, by=['foo_id'],
having=[t1.count() > 100])
result = to_sql(expr)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING count(*) > 100"""
assert result == expected
def test_aggregate_table_count_metric(self):
expr = self.con.table('star1').count()
result = to_sql(expr)
expected = """SELECT count(*) AS `tmp`
FROM star1"""
assert result == expected
# count on more complicated table
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
join_expr = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, join_expr)
table_ref = joined[nation, region.r_name.name('region')]
expr = table_ref.count()
result = to_sql(expr)
expected = """SELECT count(*) AS `tmp`
FROM (
SELECT t2.*, t1.`r_name` AS `region`
FROM tpch_region t1
INNER JOIN tpch_nation t2
ON t1.`r_regionkey` = t2.`n_regionkey`
) t0"""
assert result == expected
def test_expr_template_field_name_binding(self):
# Given an expression with no concrete links to actual database tables,
# indicate a mapping between the distinct unbound table leaves of the
# expression and some database tables with compatible schemas but
# potentially different column names
pass
def test_no_aliases_needed(self):
table = api.table([
('key1', 'string'),
('key2', 'string'),
('value', 'double')
])
expr = table.aggregate([table['value'].sum().name('total')],
by=['key1', 'key2'])
query = _get_query(expr)
context = query.context
assert not context.need_aliases()
def test_table_names_overlap_default_aliases(self):
# see discussion in #104; this actually is not needed for query
# correctness, and only makes the generated SQL nicer
raise unittest.SkipTest
t0 = api.table([
('key', 'string'),
('v1', 'double')
], 't1')
t1 = api.table([
('key', 'string'),
('v2', 'double')
], 't0')
expr = t0.join(t1, t0.key == t1.key)[t0.key, t0.v1, t1.v2]
result = to_sql(expr)
expected = """\
SELECT t2.`key`, t2.`v1`, t3.`v2`
FROM t0 t2
INNER JOIN t1 t3
ON t2.`key` = t3.`key`"""
assert result == expected
def test_context_aliases_multiple_join(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
expr = (t1.left_join(t2, [t1['foo_id'] == t2['foo_id']])
.inner_join(t3, [t1['bar_id'] == t3['bar_id']])
[[t1, t2['value1'], t3['value2']]])
query = _get_query(expr)
context = query.context
assert context.get_alias(t1) == 't0'
assert context.get_alias(t2) == 't1'
assert context.get_alias(t3) == 't2'
def test_fuse_projections(self):
table = api.table([
('foo', 'int32'),
('bar', 'int64'),
('value', 'double')
], name='tbl')
# Cases where we project in both cases using the base table reference
f1 = (table['foo'] + table['bar']).name('baz')
pred = table['value'] > 0
table2 = table[table, f1]
table2_filtered = table2[pred]
f2 = (table2['foo'] * 2).name('qux')
f3 = (table['foo'] * 2).name('qux')
table3 = table2.projection([table2, f2])
# fusion works even if there's a filter
table3_filtered = table2_filtered.projection([table2, f2])
expected = table[table, f1, f3]
expected2 = table[pred][table, f1, f3]
assert table3.equals(expected)
assert table3_filtered.equals(expected2)
ex_sql = """SELECT *, `foo` + `bar` AS `baz`, `foo` * 2 AS `qux`
FROM tbl"""
ex_sql2 = """SELECT *, `foo` + `bar` AS `baz`, `foo` * 2 AS `qux`
FROM tbl
WHERE `value` > 0"""
table3_sql = to_sql(table3)
table3_filt_sql = to_sql(table3_filtered)
assert table3_sql == ex_sql
assert table3_filt_sql == ex_sql2
# Use the intermediate table refs
table3 = table2.projection([table2, f2])
# fusion works even if there's a filter
table3_filtered = table2_filtered.projection([table2, f2])
expected = table[table, f1, f3]
expected2 = table[pred][table, f1, f3]
assert table3.equals(expected)
assert table3_filtered.equals(expected2)
def test_bug_project_multiple_times(self):
# 108
customer = self.con.table('tpch_customer')
nation = self.con.table('tpch_nation')
region = self.con.table('tpch_region')
joined = (
customer.inner_join(nation,
[customer.c_nationkey == nation.n_nationkey])
.inner_join(region,
[nation.n_regionkey == region.r_regionkey])
)
proj1 = [customer, nation.n_name, region.r_name]
step1 = joined[proj1]
topk_by = step1.c_acctbal.cast('double').sum()
pred = step1.n_name.topk(10, by=topk_by)
proj_exprs = [step1.c_name, step1.r_name, step1.n_name]
step2 = step1[pred]
expr = step2.projection(proj_exprs)
# it works!
result = to_sql(expr)
expected = """\
SELECT `c_name`, `r_name`, `n_name`
FROM (
SELECT t1.*, t2.`n_name`, t3.`r_name`
FROM tpch_customer t1
INNER JOIN tpch_nation t2
ON t1.`c_nationkey` = t2.`n_nationkey`
INNER JOIN tpch_region t3
ON t2.`n_regionkey` = t3.`r_regionkey`
LEFT SEMI JOIN (
SELECT t2.`n_name`, sum(CAST(t1.`c_acctbal` AS double)) AS `sum`
FROM tpch_customer t1
INNER JOIN tpch_nation t2
ON t1.`c_nationkey` = t2.`n_nationkey`
INNER JOIN tpch_region t3
ON t2.`n_regionkey` = t3.`r_regionkey`
GROUP BY 1
ORDER BY `sum` DESC
LIMIT 10
) t4
ON t2.`n_name` = t4.`n_name`
) t0"""
assert result == expected
def test_aggregate_projection_subquery(self):
t = self.con.table('alltypes')
proj = t[t.f > 0][t, (t.a + t.b).name('foo')]
def agg(x):
return x.aggregate([x.foo.sum().name('foo total')], by=['g'])
# predicate gets pushed down
filtered = proj[proj.g == 'bar']
result = to_sql(filtered)
expected = """SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0 AND
`g` = 'bar'"""
assert result == expected
agged = agg(filtered)
result = to_sql(agged)
expected = """SELECT `g`, sum(`foo`) AS `foo total`
FROM (
SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0 AND
`g` = 'bar'
) t0
GROUP BY 1"""
assert result == expected
# Pushdown is not possible (in Impala, Postgres, others)
agged2 = agg(proj[proj.foo < 10])
result = to_sql(agged2)
expected = """SELECT t0.`g`, sum(t0.`foo`) AS `foo total`
FROM (
SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0
) t0
WHERE t0.`foo` < 10
GROUP BY 1"""
assert result == expected
def test_subquery_aliased(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
agged = t1.aggregate([t1.f.sum().name('total')], by=['foo_id'])
what = (agged.inner_join(t2, [agged.foo_id == t2.foo_id])
[agged, t2.value1])
result = to_sql(what)
expected = """SELECT t0.*, t1.`value1`
FROM (
SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
) t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_double_nested_subquery_no_aliases(self):
# We don't require any table aliasing anywhere
t = api.table([
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value', 'double')
], 'foo_table')
agg1 = t.aggregate([t.value.sum().name('total')],
by=['key1', 'key2', 'key3'])
agg2 = agg1.aggregate([agg1.total.sum().name('total')],
by=['key1', 'key2'])
agg3 = agg2.aggregate([agg2.total.sum().name('total')],
by=['key1'])
result = to_sql(agg3)
expected = """SELECT `key1`, sum(`total`) AS `total`
FROM (
SELECT `key1`, `key2`, sum(`total`) AS `total`
FROM (
SELECT `key1`, `key2`, `key3`, sum(`value`) AS `total`
FROM foo_table
GROUP BY 1, 2, 3
) t1
GROUP BY 1, 2
) t0
GROUP BY 1"""
assert result == expected
def test_aggregate_projection_alias_bug(self):
# Observed in use
t1 = self.con.table('star1')
t2 = self.con.table('star2')
what = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
[[t1, t2.value1]])
what = what.aggregate([what.value1.sum().name('total')],
by=[what.foo_id])
# TODO: Not fusing the aggregation with the projection yet
result = to_sql(what)
expected = """SELECT `foo_id`, sum(`value1`) AS `total`
FROM (
SELECT t1.*, t2.`value1`
FROM star1 t1
INNER JOIN star2 t2
ON t1.`foo_id` = t2.`foo_id`
) t0
GROUP BY 1"""
assert result == expected
def test_aggregate_fuse_with_projection(self):
# see above test case
pass
def test_subquery_used_for_self_join(self):
# There could be cases that should look in SQL like
# WITH t0 as (some subquery)
# select ...
# from t0 t1
# join t0 t2
# on t1.kind = t2.subkind
# ...
# However, the Ibis code will simply have an expression (projection or
# aggregation, say) built on top of the subquery expression, so we need
# to extract the subquery unit (we see that it appears multiple times
# in the tree).
t = self.con.table('alltypes')
agged = t.aggregate([t.f.sum().name('total')], by=['g', 'a', 'b'])
view = agged.view()
metrics = [(agged.total - view.total).max().name('metric')]
reagged = (agged.inner_join(view, [agged.a == view.b])
.aggregate(metrics, by=[agged.g]))
result = to_sql(reagged)
expected = """WITH t0 AS (
SELECT `g`, `a`, `b`, sum(`f`) AS `total`
FROM alltypes
GROUP BY 1, 2, 3
)
SELECT t0.`g`, max(t0.`total` - t1.`total`) AS `metric`
FROM t0
INNER JOIN t0 t1
ON t0.`a` = t1.`b`
GROUP BY 1"""
assert result == expected
def test_subquery_factor_correlated_subquery(self):
# #173, #183 and other issues
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate
.cast('timestamp').name('odate')]
tpch = (region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)
[fields_of_interest])
# Self-reference + correlated subquery complicates things
t2 = tpch.view()
conditional_avg = t2[t2.region == tpch.region].amount.mean()
amount_filter = tpch.amount > conditional_avg
expr = tpch[amount_filter].limit(10)
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t5.*, t1.`r_name` AS `region`, t3.`o_totalprice` AS `amount`,
CAST(t3.`o_orderdate` AS timestamp) AS `odate`
FROM tpch_region t1
INNER JOIN tpch_nation t2
ON t1.`r_regionkey` = t2.`n_regionkey`
INNER JOIN tpch_customer t5
ON t5.`c_nationkey` = t2.`n_nationkey`
INNER JOIN tpch_orders t3
ON t3.`o_custkey` = t5.`c_custkey`
)
SELECT t0.*
FROM t0
WHERE t0.`amount` > (
SELECT avg(t4.`amount`) AS `tmp`
FROM t0 t4
WHERE t4.`region` = t0.`region`
)
LIMIT 10"""
assert result == expected
def test_self_join_subquery_distinct_equal(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
j1 = (region.join(nation, region.r_regionkey == nation.n_regionkey)
[region, nation])
j2 = (region.join(nation, region.r_regionkey == nation.n_regionkey)
[region, nation].view())
expr = (j1.join(j2, j1.r_regionkey == j2.r_regionkey)
[j1.r_name, j2.n_name])
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.*
FROM tpch_region t2
INNER JOIN tpch_nation t3
ON t2.`r_regionkey` = t3.`n_regionkey`
)
SELECT t0.`r_name`, t1.`n_name`
FROM t0
INNER JOIN t0 t1
ON t0.`r_regionkey` = t1.`r_regionkey`"""
assert result == expected
def test_limit_with_self_join(self):
t = self.con.table('functional_alltypes')
t2 = t.view()
expr = t.join(t2, t.tinyint_col < t2.timestamp_col.minute()).count()
# it works
result = to_sql(expr)
expected = """\
SELECT count(*) AS `tmp`
FROM functional_alltypes t0
INNER JOIN functional_alltypes t1
ON t0.`tinyint_col` < extract(t1.`timestamp_col`, 'minute')"""
assert result == expected
def test_cte_factor_distinct_but_equal(self):
t = self.con.table('alltypes')
tt = self.con.table('alltypes')
expr1 = t.group_by('g').aggregate(t.f.sum().name('metric'))
expr2 = tt.group_by('g').aggregate(tt.f.sum().name('metric')).view()
expr = expr1.join(expr2, expr1.g == expr2.g)[[expr1]]
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT `g`, sum(`f`) AS `metric`
FROM alltypes
GROUP BY 1
)
SELECT t0.*
FROM t0
INNER JOIN t0 t1
ON t0.`g` = t1.`g`"""
assert result == expected
def test_tpch_self_join_failure(self):
# duplicating the integration test here
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [
region.r_name.name('region'),
nation.n_name.name('nation'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate')]
joined_all = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)
[fields_of_interest])
year = joined_all.odate.year().name('year')
total = joined_all.amount.sum().cast('double').name('total')
annual_amounts = (joined_all
.group_by(['region', year])
.aggregate(total))
current = annual_amounts
prior = annual_amounts.view()
yoy_change = (current.total - prior.total).name('yoy_change')
yoy = (current.join(prior, current.year == (prior.year - 1))
[current.region, current.year, yoy_change])
to_sql(yoy)
def test_extract_subquery_nested_lower(self):
# We may have a join between two tables requiring subqueries, and
# buried inside these there may be a common subquery. Let's test that
# we find it and pull it out to the top level to avoid repeating
# ourselves.
pass
def test_subquery_in_filter_predicate(self):
# E.g. comparing against some scalar aggregate value. See Ibis #43
t1 = self.con.table('star1')
pred = t1.f > t1.f.mean()
expr = t1[pred]
# This brought out another expression rewriting bug, since the filtered
# table isn't found elsewhere in the expression.
pred2 = t1.f > t1[t1.foo_id == 'foo'].f.mean()
expr2 = t1[pred2]
result = to_sql(expr)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT avg(`f`) AS `tmp`
FROM star1
)"""
assert result == expected
result = to_sql(expr2)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT avg(`f`) AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
def test_filter_subquery_derived_reduction(self):
t1 = self.con.table('star1')
# Reduction can be nested inside some scalar expression
pred3 = t1.f > t1[t1.foo_id == 'foo'].f.mean().log()
pred4 = t1.f > (t1[t1.foo_id == 'foo'].f.mean().log() + 1)
expr3 = t1[pred3]
result = to_sql(expr3)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT ln(avg(`f`)) AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
expr4 = t1[pred4]
result = to_sql(expr4)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT ln(avg(`f`)) + 1 AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
def test_topk_operation_to_semi_join(self):
# TODO: top K with filter in place
table = api.table([
('foo', 'string'),
('bar', 'string'),
('city', 'string'),
('v1', 'double'),
('v2', 'double'),
], 'tbl')
what = table.city.topk(10, by=table.v2.mean())
filtered = table[what]
query = to_sql(filtered)
expected = """SELECT t0.*
FROM tbl t0
LEFT SEMI JOIN (
SELECT `city`, avg(`v2`) AS `mean`
FROM tbl
GROUP BY 1
ORDER BY `mean` DESC
LIMIT 10
) t1
ON t0.`city` = t1.`city`"""
assert query == expected
# Test the default metric (count)
what = table.city.topk(10)
filtered2 = table[what]
query = to_sql(filtered2)
expected = """SELECT t0.*
FROM tbl t0
LEFT SEMI JOIN (
SELECT `city`, count(`city`) AS `count`
FROM tbl
GROUP BY 1
ORDER BY `count` DESC
LIMIT 10
) t1
ON t0.`city` = t1.`city`"""
assert query == expected
def test_topk_predicate_pushdown_bug(self):
# Observed on TPCH data
cplusgeo = (
customer.inner_join(nation, [customer.c_nationkey ==
nation.n_nationkey])
.inner_join(region, [nation.n_regionkey ==
region.r_regionkey])
[customer, nation.n_name, region.r_name])
pred = cplusgeo.n_name.topk(10, by=cplusgeo.c_acctbal.sum())
expr = cplusgeo.filter([pred])
result = to_sql(expr)
expected = """\
SELECT t0.*, t1.`n_name`, t2.`r_name`
FROM customer t0
INNER JOIN nation t1
ON t0.`c_nationkey` = t1.`n_nationkey`
INNER JOIN region t2
ON t1.`n_regionkey` = t2.`r_regionkey`
LEFT SEMI JOIN (
SELECT t1.`n_name`, sum(t0.`c_acctbal`) AS `sum`
FROM customer t0
INNER JOIN nation t1
ON t0.`c_nationkey` = t1.`n_nationkey`
INNER JOIN region t2
ON t1.`n_regionkey` = t2.`r_regionkey`
GROUP BY 1
ORDER BY `sum` DESC
LIMIT 10
) t3
ON t1.`n_name` = t3.`n_name`"""
assert result == expected
def test_topk_analysis_bug(self):
# GH #398
airlines = ibis.table([('dest', 'string'),
('origin', 'string'),
('arrdelay', 'int32')], 'airlines')
dests = ['ORD', 'JFK', 'SFO']
t = airlines[airlines.dest.isin(dests)]
delay_filter = t.dest.topk(10, by=t.arrdelay.mean())
expr = t[delay_filter].group_by('origin').size()
result = to_sql(expr)
expected = """\
SELECT t0.`origin`, count(*) AS `count`
FROM airlines t0
LEFT SEMI JOIN (
SELECT `dest`, avg(`arrdelay`) AS `mean`
FROM airlines
WHERE `dest` IN ('ORD', 'JFK', 'SFO')
GROUP BY 1
ORDER BY `mean` DESC
LIMIT 10
) t1
ON t0.`dest` = t1.`dest`
WHERE t0.`dest` IN ('ORD', 'JFK', 'SFO')
GROUP BY 1"""
assert result == expected
def test_topk_to_aggregate(self):
t = ibis.table([('dest', 'string'),
('origin', 'string'),
('arrdelay', 'int32')], 'airlines')
top = t.dest.topk(10, by=t.arrdelay.mean())
result = to_sql(top)
expected = to_sql(top.to_aggregation())
assert result == expected
def test_bottomk(self):
pass
def test_topk_antijoin(self):
# Get the "other" category somehow
pass
def test_case_in_projection(self):
t = self.con.table('alltypes')
expr = (t.g.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default').end())
expr2 = (api.case()
.when(t.g == 'foo', 'bar')
.when(t.g == 'baz', t.g)
.end())
proj = t[expr.name('col1'), expr2.name('col2'), t]
result = to_sql(proj)
expected = """SELECT
CASE `g`
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END AS `col1`,
CASE
WHEN `g` = 'foo' THEN 'bar'
WHEN `g` = 'baz' THEN `g`
ELSE NULL
END AS `col2`, *
FROM alltypes"""
assert result == expected
def test_identifier_quoting(self):
data = api.table([
('date', 'int32'),
('explain', 'string')
], 'table')
expr = data[data.date.name('else'), data.explain.name('join')]
result = to_sql(expr)
expected = """SELECT `date` AS `else`, `explain` AS `join`
FROM `table`"""
assert result == expected
class TestUnions(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
table = self.con.table('functional_alltypes')
self.t1 = (table[table.int_col > 0]
[table.string_col.name('key'),
table.float_col.cast('double').name('value')])
self.t2 = (table[table.int_col <= 0]
[table.string_col.name('key'),
table.double_col.name('value')])
self.union1 = self.t1.union(self.t2)
def test_union(self):
result = to_sql(self.union1)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION ALL
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_union_distinct(self):
union = self.t1.union(self.t2, distinct=True)
result = to_sql(union)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_union_project_column(self):
# select a column, get a subquery
expr = self.union1[[self.union1.key]]
result = to_sql(expr)
expected = """SELECT `key`
FROM (
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION ALL
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0
) t0"""
assert result == expected
def test_union_extract_with_block(self):
pass
def test_union_in_subquery(self):
pass
class TestDistinct(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_simple_table_distinct(self):
t = self.con.table('functional_alltypes')
expr = t[t.string_col, t.int_col].distinct()
result = to_sql(expr)
expected = """SELECT DISTINCT `string_col`, `int_col`
FROM functional_alltypes"""
assert result == expected
def test_array_distinct(self):
t = self.con.table('functional_alltypes')
expr = t.string_col.distinct()
result = to_sql(expr)
expected = """SELECT DISTINCT `string_col`
FROM functional_alltypes"""
assert result == expected
def test_count_distinct(self):
t = self.con.table('functional_alltypes')
metric = t.int_col.nunique().name('nunique')
expr = t[t.bigint_col > 0].group_by('string_col').aggregate([metric])
result = to_sql(expr)
expected = """SELECT `string_col`, COUNT(DISTINCT `int_col`) AS `nunique`
FROM functional_alltypes
WHERE `bigint_col` > 0
GROUP BY 1"""
assert result == expected
def test_multiple_count_distinct(self):
# Impala and some other databases will not execute multiple
# count-distincts in a single aggregation query. This error reporting
# will be left to the database itself, for now.
t = self.con.table('functional_alltypes')
metrics = [t.int_col.nunique().name('int_card'),
t.smallint_col.nunique().name('smallint_card')]
expr = t.group_by('string_col').aggregate(metrics)
result = to_sql(expr)
expected = """SELECT `string_col`, COUNT(DISTINCT `int_col`) AS `int_card`,
COUNT(DISTINCT `smallint_col`) AS `smallint_card`
FROM functional_alltypes
GROUP BY 1"""
assert result == expected
class TestSubqueriesEtc(unittest.TestCase):
def setUp(self):
self.foo = api.table(
[
('job', 'string'),
('dept_id', 'string'),
('year', 'int32'),
('y', 'double')
], 'foo')
self.bar = api.table([
('x', 'double'),
('job', 'string')
], 'bar')
self.t1 = api.table([
('key1', 'string'),
('key2', 'string'),
('value1', 'double')
], 'foo')
self.t2 = api.table([
('key1', 'string'),
('key2', 'string')
], 'bar')
def test_scalar_subquery_different_table(self):
t1, t2 = self.foo, self.bar
expr = t1[t1.y > t2.x.max()]
result = to_sql(expr)
expected = """SELECT *
FROM foo
WHERE `y` > (
SELECT max(`x`) AS `tmp`
FROM bar
)"""
assert result == expected
def test_where_uncorrelated_subquery(self):
expr = self.foo[self.foo.job.isin(self.bar.job)]
result = to_sql(expr)
expected = """SELECT *
FROM foo
WHERE `job` IN (
SELECT `job`
FROM bar
)"""
assert result == expected
def test_where_correlated_subquery(self):
t1 = self.foo
t2 = t1.view()
stat = t2[t1.dept_id == t2.dept_id].y.mean()
expr = t1[t1.y > stat]
result = to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE t0.`y` > (
SELECT avg(t1.`y`) AS `tmp`
FROM foo t1
WHERE t0.`dept_id` = t1.`dept_id`
)"""
assert result == expected
def test_where_array_correlated(self):
# Test membership in some record-dependent values, if this is supported
pass
def test_exists_semi_join_case(self):
t1, t2 = self.t1, self.t2
cond = (t1.key1 == t2.key1).any()
expr = t1[cond]
result = to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1`
)"""
assert result == expected
cond2 = ((t1.key1 == t2.key1) & (t2.key2 == 'foo')).any()
expr2 = t1[cond2]
result = to_sql(expr2)
expected = """SELECT t0.*
FROM foo t0
WHERE EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1` AND
t1.`key2` = 'foo'
)"""
assert result == expected
def test_not_exists_anti_join_case(self):
t1, t2 = self.t1, self.t2
cond = (t1.key1 == t2.key1).any()
expr = t1[-cond]
result = to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE NOT EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1`
)"""
assert result == expected
| apache-2.0 |
miaecle/deepchem | datasets/construct_pdbbind_df.py | 9 | 3406 | """
Contains methods for generating a pdbbind dataset mapping
complexes (protein + ligand) to experimental binding measurement.
"""
from __future__ import print_function
import pickle
import os
import pandas as pd
from rdkit import Chem
from glob import glob
import re
from sklearn.externals import joblib
def extract_labels(pdbbind_label_file):
"""Extract labels from pdbbind label file."""
assert os.path.isfile(pdbbind_label_file)
labels = {}
with open(pdbbind_label_file) as f:
content = f.readlines()
for line in content:
if line[0] == "#":
continue
line = line.split()
# lines in the label file have format
# PDB-code Resolution Release-Year -logKd Kd reference ligand-name
#print line[0], line[3]
labels[line[0]] = line[3]
return labels
def construct_df(pdb_stem_directory, pdbbind_label_file, pdbbind_df_joblib):
"""
Takes as input a stem directory containing subdirectories with ligand
and protein pdb/mol2 files, a pdbbind_label_file containing binding
assay data for the co-crystallized ligand in each pdb file,
and a pdbbind_df_pkl to which will be saved a pandas DataFrame
where each row contains a pdb_id, smiles string, unique complex id,
ligand pdb as a list of strings per line in file, protein pdb as a list
of strings per line in file, ligand mol2 as a list of strings per line in
mol2 file, and a "label" containing the experimental measurement.
"""
labels = extract_labels(pdbbind_label_file)
df_rows = []
os.chdir(pdb_stem_directory)
pdb_directories = [pdb.replace('/', '') for pdb in glob('*/')]
for pdb_dir in pdb_directories:
print("About to extract ligand and protein input files")
pdb_id = os.path.basename(pdb_dir)
ligand_pdb = None
protein_pdb = None
for f in os.listdir(pdb_dir):
if re.search("_ligand_hyd.pdb$", f):
ligand_pdb = f
elif re.search("_protein_hyd.pdb$", f):
protein_pdb = f
elif re.search("_ligand.mol2$", f):
ligand_mol2 = f
print("Extracted Input Files:")
print (ligand_pdb, protein_pdb, ligand_mol2)
if not ligand_pdb or not protein_pdb or not ligand_mol2:
raise ValueError("Required files not present for %s" % pdb_dir)
ligand_pdb_path = os.path.join(pdb_dir, ligand_pdb)
protein_pdb_path = os.path.join(pdb_dir, protein_pdb)
ligand_mol2_path = os.path.join(pdb_dir, ligand_mol2)
with open(protein_pdb_path, "rb") as f:
protein_pdb_lines = f.readlines()
with open(ligand_pdb_path, "rb") as f:
ligand_pdb_lines = f.readlines()
try:
with open(ligand_mol2_path, "rb") as f:
ligand_mol2_lines = f.readlines()
except:
ligand_mol2_lines = []
print("About to compute ligand smiles string.")
ligand_mol = Chem.MolFromPDBFile(ligand_pdb_path)
if ligand_mol is None:
continue
smiles = Chem.MolToSmiles(ligand_mol)
complex_id = "%s%s" % (pdb_id, smiles)
label = labels[pdb_id]
df_rows.append([pdb_id, smiles, complex_id, protein_pdb_lines,
ligand_pdb_lines, ligand_mol2_lines, label])
pdbbind_df = pd.DataFrame(df_rows, columns=('pdb_id', 'smiles', 'complex_id',
'protein_pdb', 'ligand_pdb',
'ligand_mol2', 'label'))
joblib.dump(pdbbind_df, pdbbind_df_joblib)
| mit |
jchodera/mdtraj | mdtraj/utils/__init__.py | 7 | 3668 | from __future__ import print_function, division
import time
import warnings
from mdtraj.utils.delay_import import import_
from mdtraj.utils.validation import ensure_type, cast_indices, check_random_state
from mdtraj.utils.unit import in_units_of
from mdtraj.utils.rotation import rotation_matrix_from_quaternion, uniform_quaternion
from mdtraj.utils.unitcell import (lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles)
from mdtraj.utils.contextmanagers import timing, enter_temp_directory
from mdtraj.utils.zipped import open_maybe_zipped
__all__ = ["ensure_type", "import_", "in_units_of",
"lengths_and_angles_to_box_vectors",
"box_vectors_to_lengths_and_angles",
"ilen", "timing", "cast_indices", "check_random_state",
"rotation_matrix_from_quaternion", "uniform_quaternion",
"enter_temp_directory", "timing", "deprecated"]
def ilen(iterable):
"""Length of an iterator. Note, this consumes the iterator
Parameters
----------
iterable : iterable
An iterable, such as a generator, list, etc.
Returns
-------
length : int
The number of elements in the iterable
"""
return sum(1 for _ in iterable)
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Copied from scikit-learn: sklearn/utils/__init__.py
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| lgpl-2.1 |
smunaut/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mclaughlin6464/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
Morgan-Stanley/treadmill | lib/python/treadmill/cli/admin/checkout/__init__.py | 2 | 4452 | """Treadmill cell checkout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import logging
import sqlite3
import sys
import click
import pandas as pd
from treadmill import cli
_LOGGER = logging.getLogger(__name__)
def _print_query(conn, sql, index_col=None):
"""Print query results."""
row_factory = conn.row_factory
try:
conn.row_factory = None
frame = pd.read_sql(
sql, conn, index_col=index_col
)
columns = {
col: col.replace('_', '-')
for col in frame.columns
}
frame.rename(columns=columns, inplace=True)
if not frame.empty:
pd.set_option('max_rows', None)
pd.set_option('expand_frame_repr', False)
print('---')
print(frame)
print('')
finally:
conn.row_factory = row_factory
def _print_check_result(description, data, status):
"""Print check result."""
print(' {:.<69} {}'.format(
description.format(**data),
status
))
def _run_check(conn, check, verbose, index_col):
"""Run check."""
query = check['query']
alerts = check.get('alerts', [])
metric_sql = check['metric'].format(query=query)
_LOGGER.debug('metric_sql: %s', metric_sql)
cursor = conn.execute(metric_sql)
check_failed = False
empty = True
for row in cursor:
empty = False
row = dict(zip(row.keys(), row))
for alert in alerts:
match = True
status = 'pass'
for key, prop_value in alert.get('match', {}).items():
value = row.get(key)
match = match and (value == prop_value)
if not match:
continue
for key, limit in alert['threshold'].items():
value = row.get(key)
if value >= limit:
status = 'fail'
check_failed = True
_print_check_result(
alert['description'],
row,
status
)
if empty:
for alert in alerts:
_print_check_result(
alert['description'],
{},
'pass'
)
# Alert will be triggerred, display the results thtat
# caused the alert.
if verbose >= 1 or check_failed:
_print_query(
conn,
query,
index_col=index_col
)
print('')
# pylint: disable=C0103
#
# pylint does not like 'db' as variable name.
def init():
"""Top level command handler."""
@click.group(cls=cli.make_commands(__name__,
chain=True,
invoke_without_command=True))
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('-v', '--verbose', count=True)
@click.option('--db', help='Path to output sqlite db.')
def run(verbose, db):
"""Run interactive checkout."""
del verbose
del db
@run.resultcallback()
def run_checkouts(checkouts, verbose, db):
"""Run interactive checkout."""
# Too many nested blocks, need to refactor.
#
# pylint: disable=R1702
common_args = {}
if not db:
db = ':memory:'
conn = sqlite3.connect(db)
for checkout in checkouts:
try:
metadata = checkout(conn=conn, **common_args)
index_col = metadata.get('index')
all_query = metadata['query']
conn.commit()
print(checkout.__doc__)
if verbose >= 2:
_print_query(
conn,
all_query,
index_col=index_col
)
row_factory = conn.row_factory
conn.row_factory = sqlite3.Row
for check in metadata.get('checks', []):
_run_check(conn, check, verbose, index_col)
except Exception as err: # pylint: disable=W0703
_LOGGER.exception('%s', str(err))
del run_checkouts
return run
| apache-2.0 |
MechCoder/scikit-learn | sklearn/feature_extraction/image.py | 21 | 18105 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x : integer
The size of the grid in the x direction.
n_y : integer
The size of the grid in the y direction.
n_z : integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo number generator state used for random sampling to use if
`max_patches` is not None. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
uaca/deepy | examples/variational_autoencoder/visualize_vae.py | 2 | 1201 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from deepy.import_all import *
from train_vae import create_model
viz_path = os.path.join(os.path.dirname(__file__), "visualization.png")
if __name__ == '__main__':
# Load the model
model = create_model(load=True, sample=True)
# Get first image in MNIST
mnist = MnistDataset(for_autoencoder=True)
first_img = mnist.train_set()[0]
# Get the latent variable
latent_variable = model.encode(first_img)[0]
# Sample output images by varying the first latent variable
deltas = np.linspace(-2, 2, 15)
_, axmap = plt.subplots(len(deltas), len(deltas))
for i, delta0 in enumerate(deltas):
for j, delta1 in enumerate(deltas):
new_variable = list(latent_variable)
new_variable[0] += delta0
new_variable[1] += delta1
output_img = model.decode([np.array(new_variable, dtype=FLOATX)])
output_img = output_img[0].reshape((28, 28))
axmap[i, j].axis("off")
axmap[i, j].matshow(output_img, cmap="gray")
plt.savefig(viz_path, bbox_inches='tight', facecolor='black', dpi=60)
| mit |
eric-haibin-lin/mxnet | example/restricted-boltzmann-machine/binary_rbm_gluon.py | 9 | 7113 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random as pyrnd
import argparse
import numpy as np
import mxnet as mx
from matplotlib import pyplot as plt
from binary_rbm import BinaryRBMBlock
from binary_rbm import estimate_log_likelihood
### Helper function
def get_non_auxiliary_params(rbm):
return rbm.collect_params('^(?!.*_aux_.*).*$')
### Command line arguments
parser = argparse.ArgumentParser(description='Restricted Boltzmann machine learning MNIST')
parser.add_argument('--num-hidden', type=int, default=500, help='number of hidden units')
parser.add_argument('--k', type=int, default=30, help='number of Gibbs sampling steps used in the PCD algorithm')
parser.add_argument('--batch-size', type=int, default=80, help='batch size')
parser.add_argument('--num-epoch', type=int, default=130, help='number of epochs')
parser.add_argument('--learning-rate', type=float, default=0.1, help='learning rate for stochastic gradient descent') # The optimizer rescales this with `1 / batch_size`
parser.add_argument('--momentum', type=float, default=0.3, help='momentum for the stochastic gradient descent')
parser.add_argument('--ais-batch-size', type=int, default=100, help='batch size for AIS to estimate the log-likelihood')
parser.add_argument('--ais-num-batch', type=int, default=10, help='number of batches for AIS to estimate the log-likelihood')
parser.add_argument('--ais-intermediate-steps', type=int, default=10, help='number of intermediate distributions for AIS to estimate the log-likelihood')
parser.add_argument('--ais-burn-in-steps', type=int, default=10, help='number of burn in steps for each intermediate distributions of AIS to estimate the log-likelihood')
parser.add_argument('--cuda', action='store_true', dest='cuda', help='train on GPU with CUDA')
parser.add_argument('--no-cuda', action='store_false', dest='cuda', help='train on CPU')
parser.add_argument('--device-id', type=int, default=0, help='GPU device id')
parser.add_argument('--data-loader-num-worker', type=int, default=4, help='number of multithreading workers for the data loader')
parser.set_defaults(cuda=True)
args = parser.parse_args()
print(args)
### Global environment
mx.random.seed(pyrnd.getrandbits(32))
ctx = mx.gpu(args.device_id) if args.cuda else mx.cpu()
### Prepare data
def data_transform(data, label):
return data.astype(np.float32) / 255, label.astype(np.float32)
mnist_train_dataset = mx.gluon.data.vision.MNIST(train=True, transform=data_transform)
mnist_test_dataset = mx.gluon.data.vision.MNIST(train=False, transform=data_transform)
img_height = mnist_train_dataset[0][0].shape[0]
img_width = mnist_train_dataset[0][0].shape[1]
num_visible = img_width * img_height
# This generates arrays with shape (batch_size, height = 28, width = 28, num_channel = 1)
train_data = mx.gluon.data.DataLoader(mnist_train_dataset, args.batch_size, shuffle=True, num_workers=args.data_loader_num_worker)
test_data = mx.gluon.data.DataLoader(mnist_test_dataset, args.batch_size, shuffle=True, num_workers=args.data_loader_num_worker)
### Train
rbm = BinaryRBMBlock(num_hidden=args.num_hidden, k=args.k, for_training=True, prefix='rbm_')
rbm.initialize(mx.init.Normal(sigma=.01), ctx=ctx)
rbm.hybridize()
trainer = mx.gluon.Trainer(
get_non_auxiliary_params(rbm),
'sgd', {'learning_rate': args.learning_rate, 'momentum': args.momentum})
for epoch in range(args.num_epoch):
# Update parameters
for batch, _ in train_data:
batch = batch.as_in_context(ctx).flatten()
with mx.autograd.record():
out = rbm(batch)
out[0].backward()
trainer.step(batch.shape[0])
mx.nd.waitall() # To restrict memory usage
# Monitor the performace of the model
params = get_non_auxiliary_params(rbm)
param_visible_layer_bias = params['rbm_visible_layer_bias'].data(ctx=ctx)
param_hidden_layer_bias = params['rbm_hidden_layer_bias'].data(ctx=ctx)
param_interaction_weight = params['rbm_interaction_weight'].data(ctx=ctx)
test_log_likelihood, _ = estimate_log_likelihood(
param_visible_layer_bias, param_hidden_layer_bias, param_interaction_weight,
args.ais_batch_size, args.ais_num_batch, args.ais_intermediate_steps, args.ais_burn_in_steps, test_data, ctx)
train_log_likelihood, _ = estimate_log_likelihood(
param_visible_layer_bias, param_hidden_layer_bias, param_interaction_weight,
args.ais_batch_size, args.ais_num_batch, args.ais_intermediate_steps, args.ais_burn_in_steps, train_data, ctx)
print("Epoch %d completed with test log-likelihood %f and train log-likelihood %f" % (epoch, test_log_likelihood, train_log_likelihood))
### Show some samples.
# Each sample is obtained by 3000 steps of Gibbs sampling starting from a real sample.
# Starting from the real data is just for convenience of implmentation.
# There must be no correlation between the initial states and the resulting samples.
# You can start from random states and run the Gibbs chain for sufficiently long time.
print("Preparing showcase")
showcase_gibbs_sampling_steps = 3000
showcase_num_samples_w = 15
showcase_num_samples_h = 15
showcase_num_samples = showcase_num_samples_w * showcase_num_samples_h
showcase_img_shape = (showcase_num_samples_h * img_height, 2 * showcase_num_samples_w * img_width)
showcase_img_column_shape = (showcase_num_samples_h * img_height, img_width)
showcase_rbm = BinaryRBMBlock(
num_hidden=args.num_hidden,
k=showcase_gibbs_sampling_steps,
for_training=False,
params=get_non_auxiliary_params(rbm))
showcase_iter = iter(mx.gluon.data.DataLoader(mnist_train_dataset, showcase_num_samples_h, shuffle=True))
showcase_img = np.zeros(showcase_img_shape)
for i in range(showcase_num_samples_w):
data_batch = next(showcase_iter)[0].as_in_context(ctx).flatten()
sample_batch = showcase_rbm(data_batch)
# Each pixel is the probability that the unit is 1.
showcase_img[:, i * img_width : (i + 1) * img_width] = data_batch.reshape(showcase_img_column_shape).asnumpy()
showcase_img[:, (showcase_num_samples_w + i) * img_width : (showcase_num_samples_w + i + 1) * img_width
] = sample_batch[0].reshape(showcase_img_column_shape).asnumpy()
s = plt.imshow(showcase_img, cmap='gray')
plt.axis('off')
plt.axvline(showcase_num_samples_w * img_width, color='y')
plt.show(s)
print("Done") | apache-2.0 |
degoldschmidt/ribeirolab-codeconversion | python/flyPAD/fp_noisedetect.py | 1 | 7895 | #!/usr/bin/env python
"""
Script for detecting high-synchrony and -frequency noise in raw capacitance signals from flyPAD data file/s
###
Usage:
"""
# import packages
import os, sys
from tkinter import *
from tkinter import messagebox, filedialog
import json as js
import h5py as h5
from datetime import datetime as dt
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from matplotlib import rc, font_manager
import numpy as np
import scipy as sp
import scipy.signal as sg
from itertools import groupby
from helper import *
# metadata
__author__ = "Dennis Goldschmidt"
__copyright__ = "2017"
__credits__ = ["Dennis Goldschmidt"]
__license__ = "GNU GENERAL PUBLIC LICENSE v3"
__version__ = "0.1"
__maintainer__ = "Dennis Goldschmidt"
__email__ = "[email protected]"
__status__ = "In development"
def len_iter(items):
return sum(1 for _ in items)
def consecutive_one(data):
return max(len_iter(run) for val, run in groupby(data) if val)
def get_median_filtered(signal, threshold=3):
signal = signal.copy()
difference = np.abs(signal - np.median(signal))
median_difference = np.median(difference)
if median_difference == 0:
s = 0
else:
s = difference / float(median_difference)
mask = s > threshold
signal[mask] = np.median(signal)
return signal
def main(argv):
# withdraw main window
Tk().withdraw()
# go through list of arguments and check for existing files and dirs
files = arg2files(argv)
# open filedialog for files
allnoise = []
noised = []
outkeys = []
fs = 100.
askload = messagebox.askquestion("Detect noise data from files", "Do you want to open files to detect noise?", icon='warning')
while askload == 'yes':
if len(argv)==0:
files = filedialog.askopenfilenames(title='Choose file/s to load')
if len(files) == 0:
break
START = 0
STOP = 64
STEP = 2
for ind, _file in enumerate(files):
filedatetime = get_datetime(_file)
print(filedatetime.strftime("%d %b %H:%M:%S"))
outkeys.append(filedatetime.strftime("%d-%m %H:%M:%S"))
N = get_data_len(_file)
print(N)
t = np.arange(N)/float(fs)
this_data = get_data(_file, dur=N)
filtered_signal = np.zeros(this_data.shape)
sum_signal = np.zeros(t.shape)
thr = 200
for ch in range(START, STOP, STEP):
if N > 1000000:
print(ch)
""" This one does the magic """
ksize = 21
filtered_signal[ch+1] = sg.medfilt(this_data[ch+1], kernel_size=ksize)
filtered_signal[ch+1] -= filtered_signal[ch+1, 0] # baseline subtraction
filtered_signal = np.abs(filtered_signal) # positive changes from baseline
thr_signal = filtered_signal > thr
sum_signal = np.sum(thr_signal, axis=0)
ch_thr = 24
thr_sum_signal = sum_signal > ch_thr
min_len = 500
if np.count_nonzero(thr_sum_signal) > 0:
print(np.count_nonzero(thr_sum_signal), consecutive_one(thr_sum_signal))
if(consecutive_one(thr_sum_signal) > min_len):
print("Noise detected at", (np.nonzero(thr_sum_signal)[0])[0]/fs , "secs")
noised.append(True)
else:
print("No noise detected.")
noised.append(False)
else:
print("No noise detected.")
noised.append(False)
allnoise.append(thr_sum_signal)
# saving noise data
asksave = messagebox.askquestion("Saving noise data", "Do you want to save noise data into file?", icon='warning')
if asksave == 'yes':
savefile = filedialog.asksaveasfilename(title="Save datafile as...", defaultextension=".h5")
with h5.File(savefile, "w") as hf:
print("Writing file:", savefile)
for ind, noise in enumerate(allnoise):
print("Writing:", outkeys[ind])
dset = hf.create_dataset(outkeys[ind], data=noise, compression="lzf")
dset.attrs["noise"] = noised[ind]
break
# plotting noise data
if len(allnoise) == 0:
askload = messagebox.askquestion("Load noise data", "Do you want to load noise data from file?", icon='warning')
if askload == 'yes':
files = filedialog.askopenfilename(title='Choose file/s to load')
with h5.File(files, "r") as hf:
for ind, key in enumerate(hf.keys()):
outkeys.append(key)
noised.append(hf[key].attrs["noise"])
allnoise.append(hf[key][:])
else:
return
day=0
countd=0
tray = 0
plt.ion()
hours = []
days = []
for ind, noise in enumerate(allnoise):
#if (ind == 8 or ind == 10): ## TODO: excluding certain files this is specific
# continue
tray +=1
key = outkeys[ind]
if day == 0:
day = int(key[0:2])
if noised[ind]:
print(key, "Noise detected at", (np.nonzero(noise)[0])[0]/fs , "secs.")
else:
print(key, "No noise detected.")
secs = int(key[-2:])
mins = int(key[-5:-3])
hour = int(key[-8:-6])
#print(day, int(key[0:2]), (int(key[0:2])-day))
if len(hours) < 1 or hour == (hours[-1] + 1):
hours.append(hour)
days.append(countd)
if day != int(key[0:2]):
hours.append(hours[-1]+1)
days.append(countd)
hours.append(hours[-1]+1)
days.append(countd)
hours.append(hour)
countd += (int(key[0:2])-day)
days.append(countd)
day = int(key[0:2])
if ind == 0:
hourzero = hour
tstart = secs + mins*60 + (hour-hourzero)*3600 + countd*10*3600
tend = tstart + len(noise)/100.
time = np.linspace(tstart, tend, len(noise), endpoint=False)
plt.plot(time, 0*noise + 2*tray, 'k-', label='signal')
plt.xlabel('Day time', fontsize=8)
if type(files) is str:
plt.title('Noise analysis ' + os.path.basename(os.path.dirname(files)), fontsize=10)
else:
plt.title('Noise analysis ' + os.path.basename(os.path.dirname(files[0])), fontsize=10)
if noised[ind]:
plt.plot(time[noise==1], noise[noise==1] + 2*tray-1, 'r.', markersize=1, label='noise detected')
hours.append(hours[-1]+1)
days.append(countd)
hours.append(hours[-1]+1)
days.append(countd)
x = hours.copy()
for ind, lhour in enumerate(hours):
x[ind] = 3600*(lhour-hours[0]) + days[ind]*36000
print(hours)
labels = ["{0:d}:00".format(lhour) for lhour in hours]
plt.xticks(x, labels, rotation=45, fontsize=8)
plt.yticks([],[], fontsize=0)
plt.gca().xaxis.grid(True, linestyle='--')
plt.draw()
# saving plot
asksave = messagebox.askquestion("Saving plot of noise data", "Do you want to save the plot of noise data into a png file?", icon='warning')
if asksave == 'yes':
savefile = filedialog.asksaveasfilename(title="Save datafile as...", defaultextension=".png")
plt.savefig(savefile, dpi=300, bbox_inches='tight')
plt.close()
# if no files are given
if len(files) == 0:
print("WARNING: No valid files specified.")
if __name__ == "__main__":
startdt = dt.now()
main(sys.argv[1:])
print("Done. Runtime:", strfdelta(dt.now() - startdt, "%H:%M:%S"))
| gpl-3.0 |
scattering/sasnets | sasnets/util/utils.py | 1 | 1504 | """
Various small utility functions that are reused throughout the program.
"""
import os
import time
def plot(q, i_q):
"""
Method to plot Q vs I(Q) data for testing and verification purposes.
:param q: List of Q values
:param i_q: List of I values
:return: None
"""
from matplotlib import pyplot as plt
plt.style.use("classic")
plt.loglog(q, i_q)
ax = plt.gca()
ax.autoscale(enable=True)
plt.show()
def inepath(pathname):
"""
Returns a normalised path given a path. Checks if the path exists and
creates the path if it does not. If a directory is specified, appends the
current time as the filename.
:param pathname: The pathname to process.
:return: A normalised path, or None.
"""
sp = os.path.normpath(pathname)
if sp is not None:
if not os.path.exists(os.path.dirname(sp)):
os.makedirs(os.path.dirname(sp))
if os.path.isdir(sp):
return os.path.join(sp, str(time.time()))
else:
return sp
return None
def columnize(items, indent=""):
try:
from columnize import columnize as _columnize, default_opts
return _columnize(list(items), default_opts['displaywidth'],
lineprefix=indent)
except ImportError:
pass
try:
from sasmodels.compare import columnize as _columnize
return _columnize(items, indent=indent)
except ImportError:
pass
return "\n".join(items)
| bsd-3-clause |
bowang/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 15 | 31142 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (
_check_dtype(self._y.dtype) if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
deeplycloudy/lmatools | lmatools/vis/ctables.py | 1 | 105246 | from numpy import array
import matplotlib as mpl
LUTSIZE = mpl.rcParams['image.lut']
del mpl
import matplotlib.colors as colors
#These are standard National Weather Service Radar Colortables
_NWSRef_data = {
'blue': [(0.0, 0.92549019607843142, 0.92549019607843142),
(0.07142857, 0.96470588235294119, 0.96470588235294119),
(0.14285714, 0.96470588235294119, 0.96470588235294119),
(0.21428571, 0.0, 0.0),
(0.28571429, 0.0, 0.0),
(0.35714286, 0.0, 0.0),
(0.42857143, 0.0, 0.0),
(0.50000000, 0.0, 0.0),
(0.57142857, 0.0, 0.0),
(0.64285714, 0.0, 0.0),
(0.71428571, 0.0, 0.0),
(0.78571429, 0.0, 0.0),
(0.85714286, 1.0, 1.0),
(0.92857143, 0.78823529411764703, 0.78823529411764703),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.92549019607843142, 0.92549019607843142),
(0.07142857, 0.62745098039215685, 0.62745098039215685),
(0.14285714, 0.0, 0.0),
(0.21428571, 1.0, 1.0),
(0.28571429, 0.78431372549019607, 0.78431372549019607),
(0.35714286, 0.56470588235294117, 0.56470588235294117),
(0.42857143, 1.0, 1.0),
(0.50000000, 0.75294117647058822, 0.75294117647058822),
(0.57142857, 0.56470588235294117, 0.56470588235294117),
(0.64285714, 0.0, 0.0),
(0.71428571, 0.0, 0.0),
(0.78571429, 0.0, 0.0),
(0.85714286, 0.0, 0.0),
(0.92857143, 0.33333333333333331, 0.33333333333333331),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.0, 0.0),
(0.07142857, 0.0039215686274509803, 0.0039215686274509803),
(0.14285714, 0.0, 0.0),
(0.21428571, 0.0, 0.0),
(0.28571429, 0.0, 0.0),
(0.35714286, 0.0, 0.0),
(0.42857143, 1.0, 1.0),
(0.50000000, 0.90588235294117647, 0.90588235294117647),
(0.57142857, 1.0, 1.0),
(0.64285714, 1.0, 1.0),
(0.71428571, 0.83921568627450982, 0.83921568627450982),
(0.78571429, 0.75294117647058822, 0.75294117647058822),
(0.85714286, 1.0, 1.0),
(0.92857143, 0.59999999999999998, 0.59999999999999998),
(1.0, 0.0, 0.0)]}
NWSRefPrecip = colors.Normalize(5, 75)
NWSRefClearAir = colors.Normalize(-28, 28)
_NWSVel_data = {
'blue': [(0.0, 0.62352941176470589, 0.62352941176470589),
(0.071428571428571425, 0.0, 0.0),
(0.14285714285714285, 0.0, 0.0),
(0.21428571428571427, 0.0, 0.0),
(0.2857142857142857, 0.0, 0.0),
(0.3571428571428571, 0.0, 0.0),
(0.42857142857142855, 0.0, 0.0),
(0.5, 0.46666666666666667, 0.46666666666666667),
(0.5714285714285714, 0.46666666666666667, 0.46666666666666667),
(0.64285714285714279, 0.0, 0.0),
(0.71428571428571419, 0.0, 0.0),
(0.7857142857142857, 0.0, 0.0),
(0.8571428571428571, 0.0, 0.0),
(0.92857142857142849, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.071428571428571425, 1.0, 1.0),
(0.14285714285714285, 0.90980392156862744, 0.90980392156862744),
(0.21428571428571427, 0.78431372549019607, 0.78431372549019607),
(0.2857142857142857, 0.69019607843137254, 0.69019607843137254),
(0.3571428571428571, 0.56470588235294117, 0.56470588235294117),
(0.42857142857142855, 0.4392156862745098, 0.4392156862745098),
(0.5, 0.59215686274509804, 0.59215686274509804),
(0.5714285714285714, 0.46666666666666667, 0.46666666666666667),
(0.64285714285714279, 0.0, 0.0),
(0.71428571428571419, 0.0, 0.0),
(0.7857142857142857, 0.0, 0.0),
(0.8571428571428571, 0.0, 0.0),
(0.92857142857142849, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.56470588235294117, 0.56470588235294117),
(0.071428571428571425, 0.0, 0.0),
(0.14285714285714285, 0.0, 0.0),
(0.21428571428571427, 0.0, 0.0),
(0.2857142857142857, 0.0, 0.0),
(0.3571428571428571, 0.0, 0.0),
(0.42857142857142855, 0.0, 0.0),
(0.5, 0.46666666666666667, 0.46666666666666667),
(0.5714285714285714, 0.59215686274509804, 0.59215686274509804),
(0.64285714285714279, 0.50196078431372548, 0.50196078431372548),
(0.71428571428571419, 0.62745098039215685, 0.62745098039215685),
(0.7857142857142857, 0.72156862745098038, 0.72156862745098038),
(0.8571428571428571, 0.84705882352941175, 0.84705882352941175),
(0.92857142857142849, 0.93333333333333335, 0.93333333333333335),
(1.0, 1.0, 1.0)]}
_NWS_SPW_data = {
'blue': [(0.0, 0.62352941176470589, 0.62352941176470589),
(0.16666666666666666, 0.46274509803921571, 0.46274509803921571),
(0.33333333333333331, 0.61176470588235299, 0.61176470588235299),
(0.5, 0.0, 0.0),
(0.66666666666666663, 0.0, 0.0),
(0.83333333333333326, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.16666666666666666, 0.46274509803921571, 0.46274509803921571),
(0.33333333333333331, 0.61176470588235299, 0.61176470588235299),
(0.5, 0.73333333333333328, 0.73333333333333328),
(0.66666666666666663, 0.0, 0.0),
(0.83333333333333326, 0.4392156862745098, 0.4392156862745098),
(1.0, 1.0, 1.0)],
'red': [(0.0, 0.56470588235294117, 0.56470588235294117),
(0.16666666666666666, 0.46274509803921571, 0.46274509803921571),
(0.33333333333333331, 0.61176470588235299, 0.61176470588235299),
(0.5, 0.0, 0.0),
(0.66666666666666663, 1.0, 1.0),
(0.83333333333333326, 0.81568627450980391, 0.81568627450980391),
(1.0, 1.0, 1.0)]}
#A couple of mine that I've found useful
_RefDiff_data = {
'blue': [(0.0, 1.0, 1.0),
(0.1111111111111111, 1.0, 1.0),
(0.22222222222222221, 1.0, 1.0),
(0.33333333333333331, 0.0, 0.0),
(0.44444444444444442, 0.0, 0.0),
(0.55555555555555558, 0.0, 0.0),
(0.66666666666666663, 0.0, 0.0),
(0.77777777777777768, 0.0, 0.0),
(0.88888888888888884, 0.0, 0.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.1111111111111111, 0.66666666666666663, 0.66666666666666663),
(0.22222222222222221, 1.0, 1.0),
(0.33333333333333331, 0.66666666666666663, 0.66666666666666663),
(0.44444444444444442, 1.0, 1.0),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 0.66666666666666663, 0.66666666666666663),
(0.77777777777777768, 0.33333333333333331, 0.33333333333333331),
(0.88888888888888884, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.0, 0.0),
(0.1111111111111111, 0.0, 0.0),
(0.22222222222222221, 0.0, 0.0),
(0.33333333333333331, 0.0, 0.0),
(0.44444444444444442, 0.0, 0.0),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.77777777777777768, 1.0, 1.0),
(0.88888888888888884, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
#These colortables come from SoloII
_Carbone11_data = {
'blue': [(0.0, 0.55686274509803924, 0.55686274509803924),
(0.10000000000000001, 0.8901960784313725, 0.8901960784313725),
(0.20000000000000001, 0.81176470588235294, 0.81176470588235294),
(0.30000000000000004, 0.12156862745098039, 0.12156862745098039),
(0.40000000000000002, 0.63529411764705879, 0.63529411764705879),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.60000000000000009, 0.050980392156862744, 0.050980392156862744),
(0.70000000000000007, 0.23529411764705882, 0.23529411764705882),
(0.80000000000000004, 0.34509803921568627, 0.34509803921568627),
(0.90000000000000002, 0.52941176470588236, 0.52941176470588236),
(1.0, 0.23529411764705882, 0.23529411764705882)],
'green': [(0.0, 0.062745098039215685, 0.062745098039215685),
(0.10000000000000001, 0.36078431372549019, 0.36078431372549019),
(0.20000000000000001, 0.68627450980392157, 0.68627450980392157),
(0.30000000000000004, 0.65098039215686276, 0.65098039215686276),
(0.40000000000000002, 0.81568627450980391, 0.81568627450980391),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.60000000000000009, 0.86274509803921573, 0.86274509803921573),
(0.70000000000000007, 0.69019607843137254, 0.69019607843137254),
(0.80000000000000004, 0.4392156862745098, 0.4392156862745098),
(0.90000000000000002, 0.4392156862745098, 0.4392156862745098),
(1.0, 0.12549019607843137, 0.12549019607843137)],
'red': [(0.0, 0.53725490196078429, 0.53725490196078429),
(0.10000000000000001, 0.46274509803921571, 0.46274509803921571),
(0.20000000000000001, 0.43137254901960786, 0.43137254901960786),
(0.30000000000000004, 0.12156862745098039, 0.12156862745098039),
(0.40000000000000002, 0.63529411764705879, 0.63529411764705879),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.60000000000000009, 0.96470588235294119, 0.96470588235294119),
(0.70000000000000007, 0.94117647058823528, 0.94117647058823528),
(0.80000000000000004, 0.63137254901960782, 0.63137254901960782),
(0.90000000000000002, 0.8901960784313725, 0.8901960784313725),
(1.0, 0.792156862745098, 0.792156862745098)]}
_Carbone17_data = {
'blue': [(0.0, 0.55686274509803924, 0.55686274509803924),
(0.0625, 0.61960784313725492, 0.61960784313725492),
(0.125, 0.88627450980392153, 0.88627450980392153),
(0.1875, 0.80784313725490198, 0.80784313725490198),
(0.25, 0.062745098039215685, 0.062745098039215685),
(0.3125, 0.12156862745098039, 0.12156862745098039),
(0.375, 0.42745098039215684, 0.42745098039215684),
(0.4375, 0.63137254901960782, 0.63137254901960782),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.5625, 0.047058823529411764, 0.047058823529411764),
(0.625, 0.10980392156862745, 0.10980392156862745),
(0.6875, 0.20784313725490197, 0.20784313725490197),
(0.75, 0.28627450980392155, 0.28627450980392155),
(0.8125, 0.3411764705882353, 0.3411764705882353),
(0.875, 0.46666666666666667, 0.46666666666666667),
(0.9375, 0.41568627450980394, 0.41568627450980394),
(1.0, 0.25098039215686274, 0.25098039215686274)],
'green': [(0.0, 0.062745098039215685, 0.062745098039215685),
(0.0625, 0.11764705882352941, 0.11764705882352941),
(0.125, 0.36078431372549019, 0.36078431372549019),
(0.1875, 0.68235294117647061, 0.68235294117647061),
(0.25, 0.54117647058823526, 0.54117647058823526),
(0.3125, 0.65098039215686276, 0.65098039215686276),
(0.375, 0.7686274509803922, 0.7686274509803922),
(0.4375, 0.81568627450980391, 0.81568627450980391),
(0.5, 0.90196078431372551, 0.90196078431372551),
(0.5625, 0.85882352941176465, 0.85882352941176465),
(0.625, 0.74901960784313726, 0.74901960784313726),
(0.6875, 0.63921568627450975, 0.63921568627450975),
(0.75, 0.53333333333333333, 0.53333333333333333),
(0.8125, 0.4392156862745098, 0.4392156862745098),
(0.875, 0.37254901960784315, 0.37254901960784315),
(0.9375, 0.27450980392156865, 0.27450980392156865),
(1.0, 0.15294117647058825, 0.15294117647058825)],
'red': [(0.0, 0.53725490196078429, 0.53725490196078429),
(0.0625, 0.29019607843137257, 0.29019607843137257),
(0.125, 0.45882352941176469, 0.45882352941176469),
(0.1875, 0.43529411764705883, 0.43529411764705883),
(0.25, 0.062745098039215685, 0.062745098039215685),
(0.3125, 0.12156862745098039, 0.12156862745098039),
(0.375, 0.42745098039215684, 0.42745098039215684),
(0.4375, 0.63137254901960782, 0.63137254901960782),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.5625, 0.98039215686274506, 0.98039215686274506),
(0.625, 0.98039215686274506, 0.98039215686274506),
(0.6875, 0.8784313725490196, 0.8784313725490196),
(0.75, 0.74901960784313726, 0.74901960784313726),
(0.8125, 0.62745098039215685, 0.62745098039215685),
(0.875, 0.87058823529411766, 0.87058823529411766),
(0.9375, 0.94117647058823528, 0.94117647058823528),
(1.0, 0.80784313725490198, 0.80784313725490198)]}
_RRate11_data = {
'blue': [(0.0, 0.50196078431372548, 0.50196078431372548),
(0.10000000000000001, 0.62745098039215685, 0.62745098039215685),
(0.20000000000000001, 0.93333333333333335, 0.93333333333333335),
(0.30000000000000004, 0.027450980392156862, 0.027450980392156862),
(0.40000000000000002, 0.34509803921568627, 0.34509803921568627),
(0.5, 0.92156862745098034, 0.92156862745098034),
(0.60000000000000009, 0.031372549019607843, 0.031372549019607843),
(0.70000000000000007, 0.15294117647058825, 0.15294117647058825),
(0.80000000000000004, 0.30588235294117649, 0.30588235294117649),
(0.90000000000000002, 0.43137254901960786, 0.43137254901960786),
(1.0, 0.23529411764705882, 0.23529411764705882)],
'green': [(0.0, 0.0, 0.0),
(0.10000000000000001, 0.17254901960784313, 0.17254901960784313),
(0.20000000000000001, 0.36078431372549019, 0.36078431372549019),
(0.30000000000000004, 0.52156862745098043, 0.52156862745098043),
(0.40000000000000002, 0.74509803921568629, 0.74509803921568629),
(0.5, 0.93333333333333335, 0.93333333333333335),
(0.60000000000000009, 0.85490196078431369, 0.85490196078431369),
(0.70000000000000007, 0.60784313725490191, 0.60784313725490191),
(0.80000000000000004, 0.38823529411764707, 0.38823529411764707),
(0.90000000000000002, 0.3843137254901961, 0.3843137254901961),
(1.0, 0.062745098039215685, 0.062745098039215685)],
'red': [(0.0, 0.50196078431372548, 0.50196078431372548),
(0.10000000000000001, 0.29803921568627451, 0.29803921568627451),
(0.20000000000000001, 0.42352941176470588, 0.42352941176470588),
(0.30000000000000004, 0.035294117647058823, 0.035294117647058823),
(0.40000000000000002, 0.36470588235294116, 0.36470588235294116),
(0.5, 0.94117647058823528, 0.94117647058823528),
(0.60000000000000009, 0.99215686274509807, 0.99215686274509807),
(0.70000000000000007, 0.93333333333333335, 0.93333333333333335),
(0.80000000000000004, 0.58823529411764708, 0.58823529411764708),
(0.90000000000000002, 0.93333333333333335, 0.93333333333333335),
(1.0, 0.83137254901960789, 0.83137254901960789)]}
_BlueBrown10_data = {
'blue': [(0.0, 0.75294117647058822, 0.75294117647058822),
(0.1111111111111111, 0.81176470588235294, 0.81176470588235294),
(0.22222222222222221, 0.87450980392156863, 0.87450980392156863),
(0.33333333333333331, 0.93725490196078431, 0.93725490196078431),
(0.44444444444444442, 1.0, 1.0),
(0.55555555555555558, 0.69411764705882351, 0.69411764705882351),
(0.66666666666666663, 0.60392156862745094, 0.60392156862745094),
(0.77777777777777768, 0.51372549019607838, 0.51372549019607838),
(0.88888888888888884, 0.42745098039215684, 0.42745098039215684),
(1.0, 0.33725490196078434, 0.33725490196078434)],
'green': [(0.0, 0.37647058823529411, 0.37647058823529411),
(0.1111111111111111, 0.51372549019607838, 0.51372549019607838),
(0.22222222222222221, 0.65098039215686276, 0.65098039215686276),
(0.33333333333333331, 0.78823529411764703, 0.78823529411764703),
(0.44444444444444442, 0.92941176470588238, 0.92941176470588238),
(0.55555555555555558, 0.90196078431372551, 0.90196078431372551),
(0.66666666666666663, 0.76078431372549016, 0.76078431372549016),
(0.77777777777777768, 0.62352941176470589, 0.62352941176470589),
(0.88888888888888884, 0.48627450980392156, 0.48627450980392156),
(1.0, 0.34509803921568627, 0.34509803921568627)],
'red': [(0.0, 0.0039215686274509803, 0.0039215686274509803),
(0.1111111111111111, 0.0039215686274509803, 0.0039215686274509803),
(0.22222222222222221, 0.0039215686274509803, 0.0039215686274509803),
(0.33333333333333331, 0.0039215686274509803, 0.0039215686274509803),
(0.44444444444444442, 0.0078431372549019607, 0.0078431372549019607),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 0.90196078431372551, 0.90196078431372551),
(0.77777777777777768, 0.80392156862745101, 0.80392156862745101),
(0.88888888888888884, 0.70588235294117652, 0.70588235294117652),
(1.0, 0.60784313725490191, 0.60784313725490191)]}
_BlueBrown11_data = {
'blue': [(0.0, 0.75294117647058822, 0.75294117647058822),
(0.10000000000000001, 0.81176470588235294, 0.81176470588235294),
(0.20000000000000001, 0.87450980392156863, 0.87450980392156863),
(0.30000000000000004, 0.93725490196078431, 0.93725490196078431),
(0.40000000000000002, 1.0, 1.0),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.60000000000000009, 0.69411764705882351, 0.69411764705882351),
(0.70000000000000007, 0.60392156862745094, 0.60392156862745094),
(0.80000000000000004, 0.51372549019607838, 0.51372549019607838),
(0.90000000000000002, 0.42745098039215684, 0.42745098039215684),
(1.0, 0.33725490196078434, 0.33725490196078434)],
'green': [(0.0, 0.37647058823529411, 0.37647058823529411),
(0.10000000000000001, 0.51372549019607838, 0.51372549019607838),
(0.20000000000000001, 0.65098039215686276, 0.65098039215686276),
(0.30000000000000004, 0.78823529411764703, 0.78823529411764703),
(0.40000000000000002, 0.92941176470588238, 0.92941176470588238),
(0.5, 0.90196078431372551, 0.90196078431372551),
(0.60000000000000009, 0.90196078431372551, 0.90196078431372551),
(0.70000000000000007, 0.76078431372549016, 0.76078431372549016),
(0.80000000000000004, 0.62352941176470589, 0.62352941176470589),
(0.90000000000000002, 0.48627450980392156, 0.48627450980392156),
(1.0, 0.34509803921568627, 0.34509803921568627)],
'red': [(0.0, 0.0039215686274509803, 0.0039215686274509803),
(0.10000000000000001, 0.0039215686274509803, 0.0039215686274509803),
(0.20000000000000001, 0.0039215686274509803, 0.0039215686274509803),
(0.30000000000000004, 0.0039215686274509803, 0.0039215686274509803),
(0.40000000000000002, 0.0078431372549019607, 0.0078431372549019607),
(0.5, 0.90588235294117647, 0.90588235294117647),
(0.60000000000000009, 1.0, 1.0),
(0.70000000000000007, 0.90196078431372551, 0.90196078431372551),
(0.80000000000000004, 0.80392156862745101, 0.80392156862745101),
(0.90000000000000002, 0.70588235294117652, 0.70588235294117652),
(1.0, 0.60784313725490191, 0.60784313725490191)]}
_Theodore16_data = {
'blue': [(0.0, 0.99215686274509807, 0.99215686274509807),
(0.066666666666666666, 0.99215686274509807, 0.99215686274509807),
(0.13333333333333333, 0.82745098039215681, 0.82745098039215681),
(0.20000000000000001, 0.6705882352941176, 0.6705882352941176),
(0.26666666666666666, 0.0, 0.0),
(0.33333333333333331, 0.36470588235294116, 0.36470588235294116),
(0.40000000000000002, 0.59215686274509804, 0.59215686274509804),
(0.46666666666666667, 0.82352941176470584, 0.82352941176470584),
(0.53333333333333333, 0.58431372549019611, 0.58431372549019611),
(0.59999999999999998, 0.0039215686274509803, 0.0039215686274509803),
(0.66666666666666663, 0.062745098039215685, 0.062745098039215685),
(0.73333333333333328, 0.17647058823529413, 0.17647058823529413),
(0.80000000000000004, 0.11372549019607843, 0.11372549019607843),
(0.8666666666666667, 0.34901960784313724, 0.34901960784313724),
(0.93333333333333335, 0.24705882352941178, 0.24705882352941178),
(1.0, 0.46666666666666667, 0.46666666666666667)],
'green': [(0.0, 0.67450980392156867, 0.67450980392156867),
(0.066666666666666666, 0.55686274509803924, 0.55686274509803924),
(0.13333333333333333, 0.45098039215686275, 0.45098039215686275),
(0.20000000000000001, 0.30980392156862746, 0.30980392156862746),
(0.26666666666666666, 0.56862745098039214, 0.56862745098039214),
(0.33333333333333331, 0.70588235294117652, 0.70588235294117652),
(0.40000000000000002, 0.80000000000000004, 0.80000000000000004),
(0.46666666666666667, 0.89411764705882357, 0.89411764705882357),
(0.53333333333333333, 0.8901960784313725, 0.8901960784313725),
(0.59999999999999998, 0.79607843137254897, 0.79607843137254897),
(0.66666666666666663, 0.70196078431372544, 0.70196078431372544),
(0.73333333333333328, 0.52941176470588236, 0.52941176470588236),
(0.80000000000000004, 0.11372549019607843, 0.11372549019607843),
(0.8666666666666667, 0.35294117647058826, 0.35294117647058826),
(0.93333333333333335, 0.24705882352941178, 0.24705882352941178),
(1.0, 0.29019607843137257, 0.29019607843137257)],
'red': [(0.0, 0.67450980392156867, 0.67450980392156867),
(0.066666666666666666, 0.55686274509803924, 0.55686274509803924),
(0.13333333333333333, 0.45098039215686275, 0.45098039215686275),
(0.20000000000000001, 0.30588235294117649, 0.30588235294117649),
(0.26666666666666666, 0.12941176470588237, 0.12941176470588237),
(0.33333333333333331, 0.36470588235294116, 0.36470588235294116),
(0.40000000000000002, 0.59215686274509804, 0.59215686274509804),
(0.46666666666666667, 0.82352941176470584, 0.82352941176470584),
(0.53333333333333333, 0.93725490196078431, 0.93725490196078431),
(0.59999999999999998, 0.93725490196078431, 0.93725490196078431),
(0.66666666666666663, 0.93725490196078431, 0.93725490196078431),
(0.73333333333333328, 0.75686274509803919, 0.75686274509803919),
(0.80000000000000004, 0.64313725490196083, 0.64313725490196083),
(0.8666666666666667, 0.74117647058823533, 0.74117647058823533),
(0.93333333333333335, 0.86274509803921573, 0.86274509803921573),
(1.0, 0.97254901960784312, 0.97254901960784312)]}
_EWilson17_data = {
'blue': [(0.0, 0.97647058823529409, 0.97647058823529409),
(0.0625, 0.97647058823529409, 0.97647058823529409),
(0.125, 0.93725490196078431, 0.93725490196078431),
(0.1875, 0.97647058823529409, 0.97647058823529409),
(0.25, 0.89411764705882357, 0.89411764705882357),
(0.3125, 0.0, 0.0),
(0.375, 0.30980392156862746, 0.30980392156862746),
(0.4375, 0.0, 0.0),
(0.5, 0.93725490196078431, 0.93725490196078431),
(0.5625, 0.27058823529411763, 0.27058823529411763),
(0.625, 0.46666666666666667, 0.46666666666666667),
(0.6875, 0.0, 0.0),
(0.75, 0.0, 0.0),
(0.8125, 0.0, 0.0),
(0.875, 0.68235294117647061, 0.68235294117647061),
(0.9375, 0.46666666666666667, 0.46666666666666667),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.0625, 0.38823529411764707, 0.38823529411764707),
(0.125, 0.9882352941176471, 0.9882352941176471),
(0.1875, 0.62352941176470589, 0.62352941176470589),
(0.25, 0.0, 0.0),
(0.3125, 0.97647058823529409, 0.97647058823529409),
(0.375, 0.74117647058823533, 0.74117647058823533),
(0.4375, 0.53725490196078429, 0.53725490196078429),
(0.5, 0.93725490196078431, 0.93725490196078431),
(0.5625, 0.48627450980392156, 0.48627450980392156),
(0.625, 0.62352941176470589, 0.62352941176470589),
(0.6875, 0.48627450980392156, 0.48627450980392156),
(0.75, 0.64313725490196083, 0.64313725490196083),
(0.8125, 0.81960784313725488, 0.81960784313725488),
(0.875, 0.68235294117647061, 0.68235294117647061),
(0.9375, 0.46666666666666667, 0.46666666666666667),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.93725490196078431, 0.93725490196078431),
(0.0625, 0.68235294117647061, 0.68235294117647061),
(0.125, 0.0, 0.0),
(0.1875, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.3125, 0.0, 0.0),
(0.375, 0.30980392156862746, 0.30980392156862746),
(0.4375, 0.0, 0.0),
(0.5, 0.93725490196078431, 0.93725490196078431),
(0.5625, 0.70196078431372544, 0.70196078431372544),
(0.625, 0.74117647058823533, 0.74117647058823533),
(0.6875, 0.97647058823529409, 0.97647058823529409),
(0.75, 0.97647058823529409, 0.97647058823529409),
(0.8125, 0.93725490196078431, 0.93725490196078431),
(0.875, 0.99215686274509807, 0.99215686274509807),
(0.9375, 0.97647058823529409, 0.97647058823529409),
(1.0, 0.99215686274509807, 0.99215686274509807)]}
_Wild25_data = {
'blue': [(0.0, 1.0, 1.0),
(0.041666666666666664, 1.0, 1.0),
(0.083333333333333329, 1.0, 1.0),
(0.125, 1.0, 1.0),
(0.16666666666666666, 1.0, 1.0),
(0.20833333333333331, 1.0, 1.0),
(0.25, 1.0, 1.0),
(0.29166666666666663, 1.0, 1.0),
(0.33333333333333331, 0.066666666666666666, 0.066666666666666666),
(0.375, 0.066666666666666666, 0.066666666666666666),
(0.41666666666666663, 0.066666666666666666, 0.066666666666666666),
(0.45833333333333331, 0.066666666666666666, 0.066666666666666666),
(0.5, 0.78431372549019607, 0.78431372549019607),
(0.54166666666666663, 0.011764705882352941, 0.011764705882352941),
(0.58333333333333326, 0.37647058823529411, 0.37647058823529411),
(0.625, 0.12156862745098039, 0.12156862745098039),
(0.66666666666666663, 0.011764705882352941, 0.011764705882352941),
(0.70833333333333326, 0.74509803921568629, 0.74509803921568629),
(0.75, 0.53333333333333333, 0.53333333333333333),
(0.79166666666666663, 0.3411764705882353, 0.3411764705882353),
(0.83333333333333326, 0.0, 0.0),
(0.875, 0.0, 0.0),
(0.91666666666666663, 0.0, 0.0),
(0.95833333333333326, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.015686274509803921, 0.015686274509803921),
(0.041666666666666664, 0.015686274509803921, 0.015686274509803921),
(0.083333333333333329, 0.015686274509803921, 0.015686274509803921),
(0.125, 0.015686274509803921, 0.015686274509803921),
(0.16666666666666666, 0.015686274509803921, 0.015686274509803921),
(0.20833333333333331, 0.015686274509803921, 0.015686274509803921),
(0.25, 0.50196078431372548, 0.50196078431372548),
(0.29166666666666663, 0.75686274509803919, 0.75686274509803919),
(0.33333333333333331, 0.9882352941176471, 0.9882352941176471),
(0.375, 0.66274509803921566, 0.66274509803921566),
(0.41666666666666663, 0.51764705882352946, 0.51764705882352946),
(0.45833333333333331, 0.32549019607843138, 0.32549019607843138),
(0.5, 0.78431372549019607, 0.78431372549019607),
(0.54166666666666663, 0.28235294117647058, 0.28235294117647058),
(0.58333333333333326, 0.58431372549019611, 0.58431372549019611),
(0.625, 0.72156862745098038, 0.72156862745098038),
(0.66666666666666663, 1.0, 1.0),
(0.70833333333333326, 0.74509803921568629, 0.74509803921568629),
(0.75, 0.53333333333333333, 0.53333333333333333),
(0.79166666666666663, 0.3411764705882353, 0.3411764705882353),
(0.83333333333333326, 0.46666666666666667, 0.46666666666666667),
(0.875, 0.31372549019607843, 0.31372549019607843),
(0.91666666666666663, 0.0, 0.0),
(0.95833333333333326, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.99607843137254903, 0.99607843137254903),
(0.041666666666666664, 0.85098039215686272, 0.85098039215686272),
(0.083333333333333329, 0.70588235294117652, 0.70588235294117652),
(0.125, 0.5725490196078431, 0.5725490196078431),
(0.16666666666666666, 0.38823529411764707, 0.38823529411764707),
(0.20833333333333331, 0.015686274509803921, 0.015686274509803921),
(0.25, 0.015686274509803921, 0.015686274509803921),
(0.29166666666666663, 0.015686274509803921, 0.015686274509803921),
(0.33333333333333331, 0.015686274509803921, 0.015686274509803921),
(0.375, 0.015686274509803921, 0.015686274509803921),
(0.41666666666666663, 0.015686274509803921, 0.015686274509803921),
(0.45833333333333331, 0.015686274509803921, 0.015686274509803921),
(0.5, 0.78431372549019607, 0.78431372549019607),
(0.54166666666666663, 0.65490196078431373, 0.65490196078431373),
(0.58333333333333326, 0.85098039215686272, 0.85098039215686272),
(0.625, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.70833333333333326, 0.99607843137254903, 0.99607843137254903),
(0.75, 0.99607843137254903, 0.99607843137254903),
(0.79166666666666663, 0.99607843137254903, 0.99607843137254903),
(0.83333333333333326, 0.99607843137254903, 0.99607843137254903),
(0.875, 0.87450980392156863, 0.87450980392156863),
(0.91666666666666663, 0.54117647058823526, 0.54117647058823526),
(0.95833333333333326, 0.82352941176470584, 0.82352941176470584),
(1.0, 1.0, 1.0)]}
_SCook18_data = {
'blue': [(0.0, 0.9137254901960784, 0.9137254901960784),
(0.058823529411764705, 0.9137254901960784, 0.9137254901960784),
(0.11764705882352941, 0.88627450980392153, 0.88627450980392153),
(0.1764705882352941, 0.85490196078431369, 0.85490196078431369),
(0.23529411764705882, 0.69019607843137254, 0.69019607843137254),
(0.29411764705882354, 0.42352941176470588, 0.42352941176470588),
(0.3529411764705882, 0.054901960784313725, 0.054901960784313725),
(0.41176470588235292, 0.011764705882352941, 0.011764705882352941),
(0.47058823529411764, 0.3843137254901961, 0.3843137254901961),
(0.52941176470588236, 0.45098039215686275, 0.45098039215686275),
(0.58823529411764708, 0.30980392156862746, 0.30980392156862746),
(0.6470588235294118, 0.18823529411764706, 0.18823529411764706),
(0.70588235294117641, 0.066666666666666666, 0.066666666666666666),
(0.76470588235294112, 0.058823529411764705, 0.058823529411764705),
(0.82352941176470584, 0.2196078431372549, 0.2196078431372549),
(0.88235294117647056, 0.3411764705882353, 0.3411764705882353),
(0.94117647058823528, 0.070588235294117646, 0.070588235294117646),
(1.0, 0.074509803921568626, 0.074509803921568626)],
'green': [(0.0, 0.070588235294117646, 0.070588235294117646),
(0.058823529411764705, 0.21176470588235294, 0.21176470588235294),
(0.11764705882352941, 0.36078431372549019, 0.36078431372549019),
(0.1764705882352941, 0.52156862745098043, 0.52156862745098043),
(0.23529411764705882, 0.71372549019607845, 0.71372549019607845),
(0.29411764705882354, 0.88235294117647056, 0.88235294117647056),
(0.3529411764705882, 0.7803921568627451, 0.7803921568627451),
(0.41176470588235292, 0.66666666666666663, 0.66666666666666663),
(0.47058823529411764, 0.58431372549019611, 0.58431372549019611),
(0.52941176470588236, 0.58039215686274515, 0.58039215686274515),
(0.58823529411764708, 0.54117647058823526, 0.54117647058823526),
(0.6470588235294118, 0.55686274509803924, 0.55686274509803924),
(0.70588235294117641, 0.58823529411764708, 0.58823529411764708),
(0.76470588235294112, 0.67450980392156867, 0.67450980392156867),
(0.82352941176470584, 0.51372549019607838, 0.51372549019607838),
(0.88235294117647056, 0.49019607843137253, 0.49019607843137253),
(0.94117647058823528, 0.070588235294117646, 0.070588235294117646),
(1.0, 0.074509803921568626, 0.074509803921568626)],
'red': [(0.0, 0.76078431372549016, 0.76078431372549016),
(0.058823529411764705, 0.57647058823529407, 0.57647058823529407),
(0.11764705882352941, 0.32549019607843138, 0.32549019607843138),
(0.1764705882352941, 0.082352941176470587, 0.082352941176470587),
(0.23529411764705882, 0.0078431372549019607, 0.0078431372549019607),
(0.29411764705882354, 0.0078431372549019607, 0.0078431372549019607),
(0.3529411764705882, 0.0039215686274509803, 0.0039215686274509803),
(0.41176470588235292, 0.0078431372549019607, 0.0078431372549019607),
(0.47058823529411764, 0.3843137254901961, 0.3843137254901961),
(0.52941176470588236, 0.70196078431372544, 0.70196078431372544),
(0.58823529411764708, 0.75294117647058822, 0.75294117647058822),
(0.6470588235294118, 0.85882352941176465, 0.85882352941176465),
(0.70588235294117641, 0.95294117647058818, 0.95294117647058818),
(0.76470588235294112, 0.97254901960784312, 0.97254901960784312),
(0.82352941176470584, 0.89803921568627454, 0.89803921568627454),
(0.88235294117647056, 0.96862745098039216, 0.96862745098039216),
(0.94117647058823528, 0.99215686274509807, 0.99215686274509807),
(1.0, 0.85490196078431369, 0.85490196078431369)]}
_PD17_data = {
'blue': [(0.0, 0.71372549019607845, 0.71372549019607845),
(0.0625, 0.60784313725490191, 0.60784313725490191),
(0.125, 0.0, 0.0),
(0.1875, 0.039215686274509803, 0.039215686274509803),
(0.25, 0.18431372549019609, 0.18431372549019609),
(0.3125, 0.1803921568627451, 0.1803921568627451),
(0.375, 0.14509803921568629, 0.14509803921568629),
(0.4375, 0.027450980392156862, 0.027450980392156862),
(0.5, 0.0, 0.0),
(0.5625, 1.0, 1.0),
(0.625, 0.93725490196078431, 0.93725490196078431),
(0.6875, 0.75294117647058822, 0.75294117647058822),
(0.75, 1.0, 1.0),
(0.8125, 0.71372549019607845, 0.71372549019607845),
(0.875, 0.97254901960784312, 0.97254901960784312),
(0.9375, 0.50196078431372548, 0.50196078431372548),
(1.0, 0.6470588235294118, 0.6470588235294118)],
'green': [(0.0, 0.81176470588235294, 0.81176470588235294),
(0.0625, 0.84705882352941175, 0.84705882352941175),
(0.125, 0.58823529411764708, 0.58823529411764708),
(0.1875, 0.46666666666666667, 0.46666666666666667),
(0.25, 0.18431372549019609, 0.18431372549019609),
(0.3125, 0.98039215686274506, 0.98039215686274506),
(0.375, 0.98039215686274506, 0.98039215686274506),
(0.4375, 0.77647058823529413, 0.77647058823529413),
(0.5, 0.5725490196078431, 0.5725490196078431),
(0.5625, 0.87450980392156863, 0.87450980392156863),
(0.625, 0.58039215686274515, 0.58039215686274515),
(0.6875, 0.32941176470588235, 0.32941176470588235),
(0.75, 0.792156862745098, 0.792156862745098),
(0.8125, 0.60392156862745094, 0.60392156862745094),
(0.875, 0.97254901960784312, 0.97254901960784312),
(0.9375, 0.62745098039215685, 0.62745098039215685),
(1.0, 0.11372549019607843, 0.11372549019607843)],
'red': [(0.0, 0.81568627450980391, 0.81568627450980391),
(0.0625, 1.0, 1.0),
(0.125, 1.0, 1.0),
(0.1875, 0.80392156862745101, 0.80392156862745101),
(0.25, 1.0, 1.0),
(0.3125, 0.98039215686274506, 0.98039215686274506),
(0.375, 0.14117647058823529, 0.14117647058823529),
(0.4375, 0.10196078431372549, 0.10196078431372549),
(0.5, 0.0, 0.0),
(0.5625, 0.30196078431372547, 0.30196078431372547),
(0.625, 0.3843137254901961, 0.3843137254901961),
(0.6875, 0.37254901960784315, 0.37254901960784315),
(0.75, 1.0, 1.0),
(0.8125, 0.96862745098039216, 0.96862745098039216),
(0.875, 0.97254901960784312, 0.97254901960784312),
(0.9375, 0.62745098039215685, 0.62745098039215685),
(1.0, 0.7686274509803922, 0.7686274509803922)]}
_Gray5_data = {
'blue': [(0.0, 0.42352941176470588, 0.42352941176470588),
(0.25, 0.53333333333333333, 0.53333333333333333),
(0.5, 0.6588235294117647, 0.6588235294117647),
(0.75, 0.81568627450980391, 0.81568627450980391),
(1.0, 0.93725490196078431, 0.93725490196078431)],
'green': [(0.0, 0.42352941176470588, 0.42352941176470588),
(0.25, 0.53333333333333333, 0.53333333333333333),
(0.5, 0.6588235294117647, 0.6588235294117647),
(0.75, 0.81568627450980391, 0.81568627450980391),
(1.0, 0.93725490196078431, 0.93725490196078431)],
'red': [(0.0, 0.42352941176470588, 0.42352941176470588),
(0.25, 0.53333333333333333, 0.53333333333333333),
(0.5, 0.6588235294117647, 0.6588235294117647),
(0.75, 0.81568627450980391, 0.81568627450980391),
(1.0, 0.93725490196078431, 0.93725490196078431)]}
_Gray9_data = {
'blue': [(0.0, 0.37647058823529411, 0.37647058823529411),
(0.125, 0.45098039215686275, 0.45098039215686275),
(0.25, 0.52549019607843139, 0.52549019607843139),
(0.375, 0.59999999999999998, 0.59999999999999998),
(0.5, 0.67450980392156867, 0.67450980392156867),
(0.625, 0.75294117647058822, 0.75294117647058822),
(0.75, 0.82352941176470584, 0.82352941176470584),
(0.875, 0.90196078431372551, 0.90196078431372551),
(1.0, 0.97647058823529409, 0.97647058823529409)],
'green': [(0.0, 0.37647058823529411, 0.37647058823529411),
(0.125, 0.45098039215686275, 0.45098039215686275),
(0.25, 0.52549019607843139, 0.52549019607843139),
(0.375, 0.59999999999999998, 0.59999999999999998),
(0.5, 0.67450980392156867, 0.67450980392156867),
(0.625, 0.75294117647058822, 0.75294117647058822),
(0.75, 0.82352941176470584, 0.82352941176470584),
(0.875, 0.90196078431372551, 0.90196078431372551),
(1.0, 0.97647058823529409, 0.97647058823529409)],
'red': [(0.0, 0.37647058823529411, 0.37647058823529411),
(0.125, 0.45098039215686275, 0.45098039215686275),
(0.25, 0.52549019607843139, 0.52549019607843139),
(0.375, 0.59999999999999998, 0.59999999999999998),
(0.5, 0.67450980392156867, 0.67450980392156867),
(0.625, 0.75294117647058822, 0.75294117647058822),
(0.75, 0.82352941176470584, 0.82352941176470584),
(0.875, 0.90196078431372551, 0.90196078431372551),
(1.0, 0.97647058823529409, 0.97647058823529409)]}
_SymGray12_data = {
'blue': [(0.0, 0.75294117647058822, 0.75294117647058822),
(0.090909090909090912, 0.80000000000000004, 0.80000000000000004),
(0.18181818181818182, 0.84313725490196079, 0.84313725490196079),
(0.27272727272727271, 0.8901960784313725, 0.8901960784313725),
(0.36363636363636365, 0.93725490196078431, 0.93725490196078431),
(0.45454545454545459, 0.98431372549019602, 0.98431372549019602),
(0.54545454545454541, 0.69019607843137254, 0.69019607843137254),
(0.63636363636363635, 0.63529411764705879, 0.63529411764705879),
(0.72727272727272729, 0.57647058823529407, 0.57647058823529407),
(0.81818181818181823, 0.52156862745098043, 0.52156862745098043),
(0.90909090909090917, 0.46666666666666667, 0.46666666666666667),
(1.0, 0.41176470588235292, 0.41176470588235292)],
'green': [(0.0, 0.75294117647058822, 0.75294117647058822),
(0.090909090909090912, 0.80000000000000004, 0.80000000000000004),
(0.18181818181818182, 0.84313725490196079, 0.84313725490196079),
(0.27272727272727271, 0.8901960784313725, 0.8901960784313725),
(0.36363636363636365, 0.93725490196078431, 0.93725490196078431),
(0.45454545454545459, 0.98431372549019602, 0.98431372549019602),
(0.54545454545454541, 0.69019607843137254, 0.69019607843137254),
(0.63636363636363635, 0.63529411764705879, 0.63529411764705879),
(0.72727272727272729, 0.57647058823529407, 0.57647058823529407),
(0.81818181818181823, 0.52156862745098043, 0.52156862745098043),
(0.90909090909090917, 0.46666666666666667, 0.46666666666666667),
(1.0, 0.41176470588235292, 0.41176470588235292)],
'red': [(0.0, 0.75294117647058822, 0.75294117647058822),
(0.090909090909090912, 0.80000000000000004, 0.80000000000000004),
(0.18181818181818182, 0.84313725490196079, 0.84313725490196079),
(0.27272727272727271, 0.8901960784313725, 0.8901960784313725),
(0.36363636363636365, 0.93725490196078431, 0.93725490196078431),
(0.45454545454545459, 0.98431372549019602, 0.98431372549019602),
(0.54545454545454541, 0.69019607843137254, 0.69019607843137254),
(0.63636363636363635, 0.63529411764705879, 0.63529411764705879),
(0.72727272727272729, 0.57647058823529407, 0.57647058823529407),
(0.81818181818181823, 0.52156862745098043, 0.52156862745098043),
(0.90909090909090917, 0.46666666666666667, 0.46666666666666667),
(1.0, 0.41176470588235292, 0.41176470588235292)]}
_Carbone42_data = {
'blue': [(0.0, 0.63921568627450975, 0.63921568627450975),
(0.024390243902439025, 0.55686274509803924, 0.55686274509803924),
(0.04878048780487805, 0.58039215686274515, 0.58039215686274515),
(0.073170731707317083, 0.6705882352941176, 0.6705882352941176),
(0.097560975609756101, 0.76470588235294112, 0.76470588235294112),
(0.12195121951219512, 0.89411764705882357, 0.89411764705882357),
(0.14634146341463417, 0.99215686274509807, 0.99215686274509807),
(0.17073170731707318, 0.99215686274509807, 0.99215686274509807),
(0.1951219512195122, 0.59999999999999998, 0.59999999999999998),
(0.21951219512195122, 0.36862745098039218, 0.36862745098039218),
(0.24390243902439024, 0.0, 0.0),
(0.26829268292682928, 0.0, 0.0),
(0.29268292682926833, 0.0, 0.0),
(0.31707317073170732, 0.058823529411764705, 0.058823529411764705),
(0.34146341463414637, 0.17254901960784313, 0.17254901960784313),
(0.36585365853658536, 0.28627450980392155, 0.28627450980392155),
(0.3902439024390244, 0.40000000000000002, 0.40000000000000002),
(0.41463414634146345, 0.51764705882352946, 0.51764705882352946),
(0.43902439024390244, 0.63137254901960782, 0.63137254901960782),
(0.46341463414634149, 0.74901960784313726, 0.74901960784313726),
(0.48780487804878048, 0.81568627450980391, 0.81568627450980391),
(0.51219512195121952, 0.69019607843137254, 0.69019607843137254),
(0.53658536585365857, 0.34901960784313724, 0.34901960784313724),
(0.56097560975609762, 0.0, 0.0),
(0.58536585365853666, 0.019607843137254902, 0.019607843137254902),
(0.6097560975609756, 0.054901960784313725, 0.054901960784313725),
(0.63414634146341464, 0.082352941176470587, 0.082352941176470587),
(0.65853658536585369, 0.11372549019607843, 0.11372549019607843),
(0.68292682926829273, 0.14509803921568629, 0.14509803921568629),
(0.70731707317073178, 0.17647058823529413, 0.17647058823529413),
(0.73170731707317072, 0.20784313725490197, 0.20784313725490197),
(0.75609756097560976, 0.23921568627450981, 0.23921568627450981),
(0.78048780487804881, 0.24705882352941178, 0.24705882352941178),
(0.80487804878048785, 0.24705882352941178, 0.24705882352941178),
(0.8292682926829269, 0.29411764705882354, 0.29411764705882354),
(0.85365853658536583, 0.37254901960784315, 0.37254901960784315),
(0.87804878048780488, 0.0, 0.0),
(0.90243902439024393, 0.18431372549019609, 0.18431372549019609),
(0.92682926829268297, 0.27058823529411763, 0.27058823529411763),
(0.95121951219512202, 0.35294117647058826, 0.35294117647058826),
(0.97560975609756095, 0.35294117647058826, 0.35294117647058826),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.019607843137254902, 0.019607843137254902),
(0.024390243902439025, 0.22352941176470589, 0.22352941176470589),
(0.04878048780487805, 0.054901960784313725, 0.054901960784313725),
(0.073170731707317083, 0.054901960784313725, 0.054901960784313725),
(0.097560975609756101, 0.054901960784313725, 0.054901960784313725),
(0.12195121951219512, 0.13725490196078433, 0.13725490196078433),
(0.14634146341463417, 0.37254901960784315, 0.37254901960784315),
(0.17073170731707318, 0.55686274509803924, 0.55686274509803924),
(0.1951219512195122, 0.58823529411764708, 0.58823529411764708),
(0.21951219512195122, 0.52156862745098043, 0.52156862745098043),
(0.24390243902439024, 0.44313725490196076, 0.44313725490196076),
(0.26829268292682928, 0.49019607843137253, 0.49019607843137253),
(0.29268292682926833, 0.53725490196078429, 0.53725490196078429),
(0.31707317073170732, 0.58431372549019611, 0.58431372549019611),
(0.34146341463414637, 0.63137254901960782, 0.63137254901960782),
(0.36585365853658536, 0.67843137254901964, 0.67843137254901964),
(0.3902439024390244, 0.72156862745098038, 0.72156862745098038),
(0.41463414634146345, 0.7686274509803922, 0.7686274509803922),
(0.43902439024390244, 0.81568627450980391, 0.81568627450980391),
(0.46341463414634149, 0.86274509803921573, 0.86274509803921573),
(0.48780487804878048, 0.8901960784313725, 0.8901960784313725),
(0.51219512195121952, 0.8901960784313725, 0.8901960784313725),
(0.53658536585365857, 0.85882352941176465, 0.85882352941176465),
(0.56097560975609762, 0.81176470588235294, 0.81176470588235294),
(0.58536585365853666, 0.76470588235294112, 0.76470588235294112),
(0.6097560975609756, 0.71764705882352942, 0.71764705882352942),
(0.63414634146341464, 0.6705882352941176, 0.6705882352941176),
(0.65853658536585369, 0.62352941176470589, 0.62352941176470589),
(0.68292682926829273, 0.57647058823529407, 0.57647058823529407),
(0.70731707317073178, 0.52941176470588236, 0.52941176470588236),
(0.73170731707317072, 0.4823529411764706, 0.4823529411764706),
(0.75609756097560976, 0.43529411764705883, 0.43529411764705883),
(0.78048780487804881, 0.38823529411764707, 0.38823529411764707),
(0.80487804878048785, 0.3411764705882353, 0.3411764705882353),
(0.8292682926829269, 0.32549019607843138, 0.32549019607843138),
(0.85365853658536583, 0.30980392156862746, 0.30980392156862746),
(0.87804878048780488, 0.0, 0.0),
(0.90243902439024393, 0.082352941176470587, 0.082352941176470587),
(0.92682926829268297, 0.14509803921568629, 0.14509803921568629),
(0.95121951219512202, 0.20784313725490197, 0.20784313725490197),
(0.97560975609756095, 0.27058823529411763, 0.27058823529411763),
(1.0, 0.011764705882352941, 0.011764705882352941)],
'red': [(0.0, 0.46666666666666667, 0.46666666666666667),
(0.024390243902439025, 0.40000000000000002, 0.40000000000000002),
(0.04878048780487805, 0.16078431372549021, 0.16078431372549021),
(0.073170731707317083, 0.22352941176470589, 0.22352941176470589),
(0.097560975609756101, 0.28627450980392155, 0.28627450980392155),
(0.12195121951219512, 0.34901960784313724, 0.34901960784313724),
(0.14634146341463417, 0.41176470588235292, 0.41176470588235292),
(0.17073170731707318, 0.44313725490196076, 0.44313725490196076),
(0.1951219512195122, 0.27843137254901962, 0.27843137254901962),
(0.21951219512195122, 0.18431372549019609, 0.18431372549019609),
(0.24390243902439024, 0.0039215686274509803, 0.0039215686274509803),
(0.26829268292682928, 0.0, 0.0),
(0.29268292682926833, 0.0, 0.0),
(0.31707317073170732, 0.058823529411764705, 0.058823529411764705),
(0.34146341463414637, 0.17254901960784313, 0.17254901960784313),
(0.36585365853658536, 0.28627450980392155, 0.28627450980392155),
(0.3902439024390244, 0.40000000000000002, 0.40000000000000002),
(0.41463414634146345, 0.51764705882352946, 0.51764705882352946),
(0.43902439024390244, 0.63137254901960782, 0.63137254901960782),
(0.46341463414634149, 0.74901960784313726, 0.74901960784313726),
(0.48780487804878048, 0.81568627450980391, 0.81568627450980391),
(0.51219512195121952, 0.92549019607843142, 0.92549019607843142),
(0.53658536585365857, 0.93725490196078431, 0.93725490196078431),
(0.56097560975609762, 0.93725490196078431, 0.93725490196078431),
(0.58536585365853666, 0.93725490196078431, 0.93725490196078431),
(0.6097560975609756, 0.93725490196078431, 0.93725490196078431),
(0.63414634146341464, 0.92549019607843142, 0.92549019607843142),
(0.65853658536585369, 0.87058823529411766, 0.87058823529411766),
(0.68292682926829273, 0.81568627450980391, 0.81568627450980391),
(0.70731707317073178, 0.75686274509803919, 0.75686274509803919),
(0.73170731707317072, 0.70196078431372544, 0.70196078431372544),
(0.75609756097560976, 0.6470588235294118, 0.6470588235294118),
(0.78048780487804881, 0.58823529411764708, 0.58823529411764708),
(0.80487804878048785, 0.53333333333333333, 0.53333333333333333),
(0.8292682926829269, 0.4823529411764706, 0.4823529411764706),
(0.85365853658536583, 0.62745098039215685, 0.62745098039215685),
(0.87804878048780488, 0.62352941176470589, 0.62352941176470589),
(0.90243902439024393, 0.71764705882352942, 0.71764705882352942),
(0.92682926829268297, 0.81176470588235294, 0.81176470588235294),
(0.95121951219512202, 0.8784313725490196, 0.8784313725490196),
(0.97560975609756095, 0.94509803921568625, 0.94509803921568625),
(1.0, 1.0, 1.0)]}
#These come from Data Graphics Research
#http://geography.uoregon.edu/datagraphics/color_scales.htm
_BrBu12_data = {
'blue': [(0.0, 0.0, 0.0),
(0.090909090909090912, 0.0, 0.0),
(0.18181818181818182, 0.20999999999999999, 0.20999999999999999),
(0.27272727272727271, 0.47999999999999998, 0.47999999999999998),
(0.36363636363636365, 0.59499999999999997, 0.59499999999999997),
(0.45454545454545459, 0.80800000000000005, 0.80800000000000005),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 1.0, 1.0),
(0.72727272727272729, 1.0, 1.0),
(0.81818181818181823, 1.0, 1.0),
(0.90909090909090917, 0.80000000000000004, 0.80000000000000004),
(1.0, 0.59999999999999998, 0.59999999999999998)],
'green': [(0.0, 0.10000000000000001, 0.10000000000000001),
(0.090909090909090912, 0.187, 0.187),
(0.18181818181818182, 0.379, 0.379),
(0.27272727272727271, 0.60799999999999998, 0.60799999999999998),
(0.36363636363636365, 0.68799999999999994, 0.68799999999999994),
(0.45454545454545459, 0.85499999999999998, 0.85499999999999998),
(0.54545454545454541, 0.99299999999999999, 0.99299999999999999),
(0.63636363636363635, 0.97299999999999998, 0.97299999999999998),
(0.72727272727272729, 0.93999999999999995, 0.93999999999999995),
(0.81818181818181823, 0.89300000000000002, 0.89300000000000002),
(0.90909090909090917, 0.66700000000000004, 0.66700000000000004),
(1.0, 0.47999999999999998, 0.47999999999999998)],
'red': [(0.0, 0.20000000000000001, 0.20000000000000001),
(0.090909090909090912, 0.40000000000000002, 0.40000000000000002),
(0.18181818181818182, 0.59999999999999998, 0.59999999999999998),
(0.27272727272727271, 0.80000000000000004, 0.80000000000000004),
(0.36363636363636365, 0.84999999999999998, 0.84999999999999998),
(0.45454545454545459, 0.94999999999999996, 0.94999999999999996),
(0.54545454545454541, 0.80000000000000004, 0.80000000000000004),
(0.63636363636363635, 0.59999999999999998, 0.59999999999999998),
(0.72727272727272729, 0.40000000000000002, 0.40000000000000002),
(0.81818181818181823, 0.20000000000000001, 0.20000000000000001),
(0.90909090909090917, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_GrMg16_data = {
'blue': [(0.0, 0.0, 0.0),
(0.066666666666666666, 0.0, 0.0),
(0.13333333333333333, 0.0, 0.0),
(0.20000000000000001, 0.0, 0.0),
(0.26666666666666666, 0.316, 0.316),
(0.33333333333333331, 0.52600000000000002, 0.52600000000000002),
(0.40000000000000002, 0.73699999999999999, 0.73699999999999999),
(0.46666666666666667, 1.0, 1.0),
(0.53333333333333333, 1.0, 1.0),
(0.59999999999999998, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.73333333333333328, 1.0, 1.0),
(0.80000000000000004, 0.94699999999999995, 0.94699999999999995),
(0.8666666666666667, 0.73699999999999999, 0.73699999999999999),
(0.93333333333333335, 0.52600000000000002, 0.52600000000000002),
(1.0, 0.316, 0.316)],
'green': [(0.0, 0.316, 0.316),
(0.066666666666666666, 0.52600000000000002, 0.52600000000000002),
(0.13333333333333333, 0.73699999999999999, 0.73699999999999999),
(0.20000000000000001, 0.94699999999999995, 0.94699999999999995),
(0.26666666666666666, 1.0, 1.0),
(0.33333333333333331, 1.0, 1.0),
(0.40000000000000002, 1.0, 1.0),
(0.46666666666666667, 1.0, 1.0),
(0.53333333333333333, 0.94699999999999995, 0.94699999999999995),
(0.59999999999999998, 0.73699999999999999, 0.73699999999999999),
(0.66666666666666663, 0.52600000000000002, 0.52600000000000002),
(0.73333333333333328, 0.316, 0.316),
(0.80000000000000004, 0.0, 0.0),
(0.8666666666666667, 0.0, 0.0),
(0.93333333333333335, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.0, 0.0),
(0.066666666666666666, 0.0, 0.0),
(0.13333333333333333, 0.0, 0.0),
(0.20000000000000001, 0.0, 0.0),
(0.26666666666666666, 0.316, 0.316),
(0.33333333333333331, 0.52600000000000002, 0.52600000000000002),
(0.40000000000000002, 0.73699999999999999, 0.73699999999999999),
(0.46666666666666667, 1.0, 1.0),
(0.53333333333333333, 1.0, 1.0),
(0.59999999999999998, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.73333333333333328, 1.0, 1.0),
(0.80000000000000004, 0.94699999999999995, 0.94699999999999995),
(0.8666666666666667, 0.73699999999999999, 0.73699999999999999),
(0.93333333333333335, 0.52600000000000002, 0.52600000000000002),
(1.0, 0.316, 0.316)]}
_BrBu10_data = {
'blue': [(0.0, 0.0, 0.0),
(0.1111111111111111, 0.20999999999999999, 0.20999999999999999),
(0.22222222222222221, 0.47999999999999998, 0.47999999999999998),
(0.33333333333333331, 0.59499999999999997, 0.59499999999999997),
(0.44444444444444442, 0.80800000000000005, 0.80800000000000005),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.77777777777777768, 1.0, 1.0),
(0.88888888888888884, 1.0, 1.0),
(1.0, 0.80000000000000004, 0.80000000000000004)],
'green': [(0.0, 0.187, 0.187),
(0.1111111111111111, 0.379, 0.379),
(0.22222222222222221, 0.60799999999999998, 0.60799999999999998),
(0.33333333333333331, 0.68799999999999994, 0.68799999999999994),
(0.44444444444444442, 0.85499999999999998, 0.85499999999999998),
(0.55555555555555558, 0.99299999999999999, 0.99299999999999999),
(0.66666666666666663, 0.97299999999999998, 0.97299999999999998),
(0.77777777777777768, 0.93999999999999995, 0.93999999999999995),
(0.88888888888888884, 0.89300000000000002, 0.89300000000000002),
(1.0, 0.66700000000000004, 0.66700000000000004)],
'red': [(0.0, 0.40000000000000002, 0.40000000000000002),
(0.1111111111111111, 0.59999999999999998, 0.59999999999999998),
(0.22222222222222221, 0.80000000000000004, 0.80000000000000004),
(0.33333333333333331, 0.84999999999999998, 0.84999999999999998),
(0.44444444444444442, 0.94999999999999996, 0.94999999999999996),
(0.55555555555555558, 0.80000000000000004, 0.80000000000000004),
(0.66666666666666663, 0.59999999999999998, 0.59999999999999998),
(0.77777777777777768, 0.40000000000000002, 0.40000000000000002),
(0.88888888888888884, 0.20000000000000001, 0.20000000000000001),
(1.0, 0.0, 0.0)]}
_Bu10_data = {
'blue': [(0.0, 1.0, 1.0),
(0.1111111111111111, 1.0, 1.0),
(0.22222222222222221, 1.0, 1.0),
(0.33333333333333331, 1.0, 1.0),
(0.44444444444444442, 1.0, 1.0),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.77777777777777768, 1.0, 1.0),
(0.88888888888888884, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 1.0, 1.0),
(0.1111111111111111, 0.98299999999999998, 0.98299999999999998),
(0.22222222222222221, 0.94999999999999996, 0.94999999999999996),
(0.33333333333333331, 0.90000000000000002, 0.90000000000000002),
(0.44444444444444442, 0.83299999999999996, 0.83299999999999996),
(0.55555555555555558, 0.75, 0.75),
(0.66666666666666663, 0.65000000000000002, 0.65000000000000002),
(0.77777777777777768, 0.53300000000000003, 0.53300000000000003),
(0.88888888888888884, 0.40000000000000002, 0.40000000000000002),
(1.0, 0.25, 0.25)],
'red': [(0.0, 0.90000000000000002, 0.90000000000000002),
(0.1111111111111111, 0.80000000000000004, 0.80000000000000004),
(0.22222222222222221, 0.69999999999999996, 0.69999999999999996),
(0.33333333333333331, 0.59999999999999998, 0.59999999999999998),
(0.44444444444444442, 0.5, 0.5),
(0.55555555555555558, 0.40000000000000002, 0.40000000000000002),
(0.66666666666666663, 0.29999999999999999, 0.29999999999999999),
(0.77777777777777768, 0.20000000000000001, 0.20000000000000001),
(0.88888888888888884, 0.10000000000000001, 0.10000000000000001),
(1.0, 0.0, 0.0)]}
_BuDOr12_data = {
'blue': [(0.0, 0.59999999999999998, 0.59999999999999998),
(0.090909090909090912, 0.80000000000000004, 0.80000000000000004),
(0.18181818181818182, 1.0, 1.0),
(0.27272727272727271, 1.0, 1.0),
(0.36363636363636365, 1.0, 1.0),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 0.80000000000000004, 0.80000000000000004),
(0.63636363636363635, 0.59999999999999998, 0.59999999999999998),
(0.72727272727272729, 0.40000000000000002, 0.40000000000000002),
(0.81818181818181823, 0.20000000000000001, 0.20000000000000001),
(0.90909090909090917, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.56000000000000005, 0.56000000000000005),
(0.090909090909090912, 0.76800000000000002, 0.76800000000000002),
(0.18181818181818182, 0.97999999999999998, 0.97999999999999998),
(0.27272727272727271, 0.98999999999999999, 0.98999999999999999),
(0.36363636363636365, 0.997, 0.997),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 0.90000000000000002, 0.90000000000000002),
(0.63636363636363635, 0.79300000000000004, 0.79300000000000004),
(0.72727272727272729, 0.68000000000000005, 0.68000000000000005),
(0.81818181818181823, 0.56000000000000005, 0.56000000000000005),
(0.90909090909090917, 0.34699999999999998, 0.34699999999999998),
(1.0, 0.25, 0.25)],
'red': [(0.0, 0.12, 0.12),
(0.090909090909090912, 0.32000000000000001, 0.32000000000000001),
(0.18181818181818182, 0.59999999999999998, 0.59999999999999998),
(0.27272727272727271, 0.69999999999999996, 0.69999999999999996),
(0.36363636363636365, 0.80000000000000004, 0.80000000000000004),
(0.45454545454545459, 0.90000000000000002, 0.90000000000000002),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 1.0, 1.0),
(0.72727272727272729, 1.0, 1.0),
(0.81818181818181823, 1.0, 1.0),
(0.90909090909090917, 0.80000000000000004, 0.80000000000000004),
(1.0, 0.59999999999999998, 0.59999999999999998)]}
_StepSeq25_data = {
'blue': [(0.0, 0.059999999999999998, 0.059999999999999998),
(0.041666666666666664, 0.17499999999999999, 0.17499999999999999),
(0.083333333333333329, 0.32000000000000001, 0.32000000000000001),
(0.125, 0.495, 0.495),
(0.16666666666666666, 0.69999999999999996, 0.69999999999999996),
(0.20833333333333331, 0.059999999999999998, 0.059999999999999998),
(0.25, 0.17499999999999999, 0.17499999999999999),
(0.29166666666666663, 0.32000000000000001, 0.32000000000000001),
(0.33333333333333331, 0.495, 0.495),
(0.375, 0.69999999999999996, 0.69999999999999996),
(0.41666666666666663, 0.059999999999999998, 0.059999999999999998),
(0.45833333333333331, 0.17499999999999999, 0.17499999999999999),
(0.5, 0.32000000000000001, 0.32000000000000001),
(0.54166666666666663, 0.495, 0.495),
(0.58333333333333326, 0.69999999999999996, 0.69999999999999996),
(0.625, 0.59999999999999998, 0.59999999999999998),
(0.66666666666666663, 0.69999999999999996, 0.69999999999999996),
(0.70833333333333326, 0.80000000000000004, 0.80000000000000004),
(0.75, 0.90000000000000002, 0.90000000000000002),
(0.79166666666666663, 1.0, 1.0),
(0.83333333333333326, 0.59999999999999998, 0.59999999999999998),
(0.875, 0.69999999999999996, 0.69999999999999996),
(0.91666666666666663, 0.80000000000000004, 0.80000000000000004),
(0.95833333333333326, 0.90000000000000002, 0.90000000000000002),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.059999999999999998, 0.059999999999999998),
(0.041666666666666664, 0.17499999999999999, 0.17499999999999999),
(0.083333333333333329, 0.32000000000000001, 0.32000000000000001),
(0.125, 0.495, 0.495),
(0.16666666666666666, 0.69999999999999996, 0.69999999999999996),
(0.20833333333333331, 0.33000000000000002, 0.33000000000000002),
(0.25, 0.438, 0.438),
(0.29166666666666663, 0.56000000000000005, 0.56000000000000005),
(0.33333333333333331, 0.69699999999999995, 0.69699999999999995),
(0.375, 0.84999999999999998, 0.84999999999999998),
(0.41666666666666663, 0.59999999999999998, 0.59999999999999998),
(0.45833333333333331, 0.69999999999999996, 0.69999999999999996),
(0.5, 0.80000000000000004, 0.80000000000000004),
(0.54166666666666663, 0.90000000000000002, 0.90000000000000002),
(0.58333333333333326, 1.0, 1.0),
(0.625, 0.41999999999999998, 0.41999999999999998),
(0.66666666666666663, 0.52500000000000002, 0.52500000000000002),
(0.70833333333333326, 0.64000000000000001, 0.64000000000000001),
(0.75, 0.76500000000000001, 0.76500000000000001),
(0.79166666666666663, 0.90000000000000002, 0.90000000000000002),
(0.83333333333333326, 0.059999999999999998, 0.059999999999999998),
(0.875, 0.17499999999999999, 0.17499999999999999),
(0.91666666666666663, 0.32000000000000001, 0.32000000000000001),
(0.95833333333333326, 0.495, 0.495),
(1.0, 0.69999999999999996, 0.69999999999999996)],
'red': [(0.0, 0.59999999999999998, 0.59999999999999998),
(0.041666666666666664, 0.69999999999999996, 0.69999999999999996),
(0.083333333333333329, 0.80000000000000004, 0.80000000000000004),
(0.125, 0.90000000000000002, 0.90000000000000002),
(0.16666666666666666, 1.0, 1.0),
(0.20833333333333331, 0.59999999999999998, 0.59999999999999998),
(0.25, 0.69999999999999996, 0.69999999999999996),
(0.29166666666666663, 0.80000000000000004, 0.80000000000000004),
(0.33333333333333331, 0.90000000000000002, 0.90000000000000002),
(0.375, 1.0, 1.0),
(0.41666666666666663, 0.41999999999999998, 0.41999999999999998),
(0.45833333333333331, 0.52500000000000002, 0.52500000000000002),
(0.5, 0.64000000000000001, 0.64000000000000001),
(0.54166666666666663, 0.76500000000000001, 0.76500000000000001),
(0.58333333333333326, 0.90000000000000002, 0.90000000000000002),
(0.625, 0.059999999999999998, 0.059999999999999998),
(0.66666666666666663, 0.17499999999999999, 0.17499999999999999),
(0.70833333333333326, 0.32000000000000001, 0.32000000000000001),
(0.75, 0.495, 0.495),
(0.79166666666666663, 0.69999999999999996, 0.69999999999999996),
(0.83333333333333326, 0.14999999999999999, 0.14999999999999999),
(0.875, 0.26200000000000001, 0.26200000000000001),
(0.91666666666666663, 0.40000000000000002, 0.40000000000000002),
(0.95833333333333326, 0.56200000000000006, 0.56200000000000006),
(1.0, 0.75, 0.75)]}
_RdYlBu11b_data = {
'blue': [(0.0, 0.13, 0.13),
(0.10000000000000001, 0.19600000000000001, 0.19600000000000001),
(0.20000000000000001, 0.37, 0.37),
(0.30000000000000004, 0.45000000000000001, 0.45000000000000001),
(0.40000000000000002, 0.59999999999999998, 0.59999999999999998),
(0.5, 0.75, 0.75),
(0.60000000000000009, 1.0, 1.0),
(0.70000000000000007, 1.0, 1.0),
(0.80000000000000004, 1.0, 1.0),
(0.90000000000000002, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.10000000000000001, 0.14999999999999999, 0.14999999999999999),
(0.20000000000000001, 0.42999999999999999, 0.42999999999999999),
(0.30000000000000004, 0.68000000000000005, 0.68000000000000005),
(0.40000000000000002, 0.88, 0.88),
(0.5, 1.0, 1.0),
(0.60000000000000009, 1.0, 1.0),
(0.70000000000000007, 0.96999999999999997, 0.96999999999999997),
(0.80000000000000004, 0.84999999999999998, 0.84999999999999998),
(0.90000000000000002, 0.63, 0.63),
(1.0, 0.29999999999999999, 0.29999999999999999)],
'red': [(0.0, 0.65000000000000002, 0.65000000000000002),
(0.10000000000000001, 0.84999999999999998, 0.84999999999999998),
(0.20000000000000001, 0.96999999999999997, 0.96999999999999997),
(0.30000000000000004, 1.0, 1.0),
(0.40000000000000002, 1.0, 1.0),
(0.5, 1.0, 1.0),
(0.60000000000000009, 0.88, 0.88),
(0.70000000000000007, 0.67000000000000004, 0.67000000000000004),
(0.80000000000000004, 0.45000000000000001, 0.45000000000000001),
(0.90000000000000002, 0.25, 0.25),
(1.0, 0.14999999999999999, 0.14999999999999999)]}
_Bu7_data = {
'blue': [(0.0, 1.0, 1.0),
(0.16666666666666666, 1.0, 1.0),
(0.33333333333333331, 1.0, 1.0),
(0.5, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.83333333333333326, 0.80000000000000004, 0.80000000000000004),
(1.0, 0.59999999999999998, 0.59999999999999998)],
'green': [(0.0, 1.0, 1.0),
(0.16666666666666666, 0.99299999999999999, 0.99299999999999999),
(0.33333333333333331, 0.97299999999999998, 0.97299999999999998),
(0.5, 0.93999999999999995, 0.93999999999999995),
(0.66666666666666663, 0.89300000000000002, 0.89300000000000002),
(0.83333333333333326, 0.66700000000000004, 0.66700000000000004),
(1.0, 0.47999999999999998, 0.47999999999999998)],
'red': [(0.0, 1.0, 1.0),
(0.16666666666666666, 0.80000000000000004, 0.80000000000000004),
(0.33333333333333331, 0.59999999999999998, 0.59999999999999998),
(0.5, 0.40000000000000002, 0.40000000000000002),
(0.66666666666666663, 0.20000000000000001, 0.20000000000000001),
(0.83333333333333326, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuOr12_data = {
'blue': [(0.0, 1.0, 1.0),
(0.090909090909090912, 1.0, 1.0),
(0.18181818181818182, 1.0, 1.0),
(0.27272727272727271, 1.0, 1.0),
(0.36363636363636365, 1.0, 1.0),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 0.80000000000000004, 0.80000000000000004),
(0.63636363636363635, 0.59999999999999998, 0.59999999999999998),
(0.72727272727272729, 0.40000000000000002, 0.40000000000000002),
(0.81818181818181823, 0.20000000000000001, 0.20000000000000001),
(0.90909090909090917, 0.10000000000000001, 0.10000000000000001),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.16700000000000001, 0.16700000000000001),
(0.090909090909090912, 0.40000000000000002, 0.40000000000000002),
(0.18181818181818182, 0.59999999999999998, 0.59999999999999998),
(0.27272727272727271, 0.80000000000000004, 0.80000000000000004),
(0.36363636363636365, 0.93300000000000005, 0.93300000000000005),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 0.93300000000000005, 0.93300000000000005),
(0.72727272727272729, 0.80000000000000004, 0.80000000000000004),
(0.81818181818181823, 0.59999999999999998, 0.59999999999999998),
(0.90909090909090917, 0.40000000000000002, 0.40000000000000002),
(1.0, 0.16700000000000001, 0.16700000000000001)],
'red': [(0.0, 0.0, 0.0),
(0.090909090909090912, 0.10000000000000001, 0.10000000000000001),
(0.18181818181818182, 0.20000000000000001, 0.20000000000000001),
(0.27272727272727271, 0.40000000000000002, 0.40000000000000002),
(0.36363636363636365, 0.59999999999999998, 0.59999999999999998),
(0.45454545454545459, 0.80000000000000004, 0.80000000000000004),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 1.0, 1.0),
(0.72727272727272729, 1.0, 1.0),
(0.81818181818181823, 1.0, 1.0),
(0.90909090909090917, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
_BuGr14_data = {
'blue': [(0.0, 1.0, 1.0),
(0.076923076923076927, 1.0, 1.0),
(0.15384615384615385, 1.0, 1.0),
(0.23076923076923078, 1.0, 1.0),
(0.30769230769230771, 1.0, 1.0),
(0.38461538461538464, 1.0, 1.0),
(0.46153846153846156, 1.0, 1.0),
(0.53846153846153855, 0.90000000000000002, 0.90000000000000002),
(0.61538461538461542, 0.80000000000000004, 0.80000000000000004),
(0.69230769230769229, 0.69999999999999996, 0.69999999999999996),
(0.76923076923076927, 0.59999999999999998, 0.59999999999999998),
(0.84615384615384626, 0.40000000000000002, 0.40000000000000002),
(0.92307692307692313, 0.20000000000000001, 0.20000000000000001),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.076923076923076927, 0.20000000000000001, 0.20000000000000001),
(0.15384615384615385, 0.40000000000000002, 0.40000000000000002),
(0.23076923076923078, 0.59999999999999998, 0.59999999999999998),
(0.30769230769230771, 0.69999999999999996, 0.69999999999999996),
(0.38461538461538464, 0.80000000000000004, 0.80000000000000004),
(0.46153846153846156, 0.90000000000000002, 0.90000000000000002),
(0.53846153846153855, 1.0, 1.0),
(0.61538461538461542, 1.0, 1.0),
(0.69230769230769229, 1.0, 1.0),
(0.76923076923076927, 1.0, 1.0),
(0.84615384615384626, 1.0, 1.0),
(0.92307692307692313, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'red': [(0.0, 0.0, 0.0),
(0.076923076923076927, 0.20000000000000001, 0.20000000000000001),
(0.15384615384615385, 0.40000000000000002, 0.40000000000000002),
(0.23076923076923078, 0.59999999999999998, 0.59999999999999998),
(0.30769230769230771, 0.69999999999999996, 0.69999999999999996),
(0.38461538461538464, 0.80000000000000004, 0.80000000000000004),
(0.46153846153846156, 0.90000000000000002, 0.90000000000000002),
(0.53846153846153855, 0.90000000000000002, 0.90000000000000002),
(0.61538461538461542, 0.80000000000000004, 0.80000000000000004),
(0.69230769230769229, 0.69999999999999996, 0.69999999999999996),
(0.76923076923076927, 0.59999999999999998, 0.59999999999999998),
(0.84615384615384626, 0.40000000000000002, 0.40000000000000002),
(0.92307692307692313, 0.20000000000000001, 0.20000000000000001),
(1.0, 0.0, 0.0)]}
_BuDRd18_data = {
'blue': [(0.0, 0.84999999999999998, 0.84999999999999998),
(0.058823529411764705, 0.96999999999999997, 0.96999999999999997),
(0.11764705882352941, 1.0, 1.0),
(0.1764705882352941, 1.0, 1.0),
(0.23529411764705882, 1.0, 1.0),
(0.29411764705882354, 1.0, 1.0),
(0.3529411764705882, 1.0, 1.0),
(0.41176470588235292, 1.0, 1.0),
(0.47058823529411764, 1.0, 1.0),
(0.52941176470588236, 0.92000000000000004, 0.92000000000000004),
(0.58823529411764708, 0.73999999999999999, 0.73999999999999999),
(0.6470588235294118, 0.59999999999999998, 0.59999999999999998),
(0.70588235294117641, 0.46000000000000002, 0.46000000000000002),
(0.76470588235294112, 0.34000000000000002, 0.34000000000000002),
(0.82352941176470584, 0.23999999999999999, 0.23999999999999999),
(0.88235294117647056, 0.20999999999999999, 0.20999999999999999),
(0.94117647058823528, 0.187, 0.187),
(1.0, 0.13, 0.13)],
'green': [(0.0, 0.0, 0.0),
(0.058823529411764705, 0.112, 0.112),
(0.11764705882352941, 0.34200000000000003, 0.34200000000000003),
(0.1764705882352941, 0.53100000000000003, 0.53100000000000003),
(0.23529411764705882, 0.69199999999999995, 0.69199999999999995),
(0.29411764705882354, 0.82899999999999996, 0.82899999999999996),
(0.3529411764705882, 0.92000000000000004, 0.92000000000000004),
(0.41176470588235292, 0.97799999999999998, 0.97799999999999998),
(0.47058823529411764, 1.0, 1.0),
(0.52941176470588236, 1.0, 1.0),
(0.58823529411764708, 0.94799999999999995, 0.94799999999999995),
(0.6470588235294118, 0.83999999999999997, 0.83999999999999997),
(0.70588235294117641, 0.67600000000000005, 0.67600000000000005),
(0.76470588235294112, 0.47199999999999998, 0.47199999999999998),
(0.82352941176470584, 0.23999999999999999, 0.23999999999999999),
(0.88235294117647056, 0.155, 0.155),
(0.94117647058823528, 0.085000000000000006, 0.085000000000000006),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.14199999999999999, 0.14199999999999999),
(0.058823529411764705, 0.097000000000000003, 0.097000000000000003),
(0.11764705882352941, 0.16, 0.16),
(0.1764705882352941, 0.23999999999999999, 0.23999999999999999),
(0.23529411764705882, 0.34000000000000002, 0.34000000000000002),
(0.29411764705882354, 0.46000000000000002, 0.46000000000000002),
(0.3529411764705882, 0.59999999999999998, 0.59999999999999998),
(0.41176470588235292, 0.73999999999999999, 0.73999999999999999),
(0.47058823529411764, 0.92000000000000004, 0.92000000000000004),
(0.52941176470588236, 1.0, 1.0),
(0.58823529411764708, 1.0, 1.0),
(0.6470588235294118, 1.0, 1.0),
(0.70588235294117641, 1.0, 1.0),
(0.76470588235294112, 1.0, 1.0),
(0.82352941176470584, 1.0, 1.0),
(0.88235294117647056, 0.96999999999999997, 0.96999999999999997),
(0.94117647058823528, 0.84999999999999998, 0.84999999999999998),
(1.0, 0.65000000000000002, 0.65000000000000002)]}
_BuDOr18_data = {
'blue': [(0.0, 0.40000000000000002, 0.40000000000000002),
(0.058823529411764705, 0.59999999999999998, 0.59999999999999998),
(0.11764705882352941, 0.80000000000000004, 0.80000000000000004),
(0.1764705882352941, 1.0, 1.0),
(0.23529411764705882, 1.0, 1.0),
(0.29411764705882354, 1.0, 1.0),
(0.3529411764705882, 1.0, 1.0),
(0.41176470588235292, 1.0, 1.0),
(0.47058823529411764, 1.0, 1.0),
(0.52941176470588236, 1.0, 1.0),
(0.58823529411764708, 0.80000000000000004, 0.80000000000000004),
(0.6470588235294118, 0.59999999999999998, 0.59999999999999998),
(0.70588235294117641, 0.40000000000000002, 0.40000000000000002),
(0.76470588235294112, 0.20000000000000001, 0.20000000000000001),
(0.82352941176470584, 0.0, 0.0),
(0.88235294117647056, 0.0, 0.0),
(0.94117647058823528, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.40000000000000002, 0.40000000000000002),
(0.058823529411764705, 0.59999999999999998, 0.59999999999999998),
(0.11764705882352941, 0.80000000000000004, 0.80000000000000004),
(0.1764705882352941, 1.0, 1.0),
(0.23529411764705882, 1.0, 1.0),
(0.29411764705882354, 1.0, 1.0),
(0.3529411764705882, 1.0, 1.0),
(0.41176470588235292, 1.0, 1.0),
(0.47058823529411764, 1.0, 1.0),
(0.52941176470588236, 1.0, 1.0),
(0.58823529411764708, 0.90000000000000002, 0.90000000000000002),
(0.6470588235294118, 0.79300000000000004, 0.79300000000000004),
(0.70588235294117641, 0.68000000000000005, 0.68000000000000005),
(0.76470588235294112, 0.56000000000000005, 0.56000000000000005),
(0.82352941176470584, 0.433, 0.433),
(0.88235294117647056, 0.33300000000000002, 0.33300000000000002),
(0.94117647058823528, 0.23999999999999999, 0.23999999999999999),
(1.0, 0.153, 0.153)],
'red': [(0.0, 0.0, 0.0),
(0.058823529411764705, 0.0, 0.0),
(0.11764705882352941, 0.0, 0.0),
(0.1764705882352941, 0.0, 0.0),
(0.23529411764705882, 0.20000000000000001, 0.20000000000000001),
(0.29411764705882354, 0.40000000000000002, 0.40000000000000002),
(0.3529411764705882, 0.59999999999999998, 0.59999999999999998),
(0.41176470588235292, 0.69999999999999996, 0.69999999999999996),
(0.47058823529411764, 0.80000000000000004, 0.80000000000000004),
(0.52941176470588236, 0.90000000000000002, 0.90000000000000002),
(0.58823529411764708, 1.0, 1.0),
(0.6470588235294118, 1.0, 1.0),
(0.70588235294117641, 1.0, 1.0),
(0.76470588235294112, 1.0, 1.0),
(0.82352941176470584, 1.0, 1.0),
(0.88235294117647056, 0.80000000000000004, 0.80000000000000004),
(0.94117647058823528, 0.59999999999999998, 0.59999999999999998),
(1.0, 0.40000000000000002, 0.40000000000000002)]}
_BuOr8_data = {
'blue': [(0.0, 1.0, 1.0),
(0.14285714285714285, 1.0, 1.0),
(0.2857142857142857, 1.0, 1.0),
(0.42857142857142855, 1.0, 1.0),
(0.5714285714285714, 0.80000000000000004, 0.80000000000000004),
(0.71428571428571419, 0.59999999999999998, 0.59999999999999998),
(0.8571428571428571, 0.29999999999999999, 0.29999999999999999),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.5, 0.5),
(0.14285714285714285, 0.76700000000000002, 0.76700000000000002),
(0.2857142857142857, 0.93300000000000005, 0.93300000000000005),
(0.42857142857142855, 1.0, 1.0),
(0.5714285714285714, 1.0, 1.0),
(0.71428571428571419, 0.93300000000000005, 0.93300000000000005),
(0.8571428571428571, 0.76700000000000002, 0.76700000000000002),
(1.0, 0.5, 0.5)],
'red': [(0.0, 0.0, 0.0),
(0.14285714285714285, 0.29999999999999999, 0.29999999999999999),
(0.2857142857142857, 0.59999999999999998, 0.59999999999999998),
(0.42857142857142855, 0.80000000000000004, 0.80000000000000004),
(0.5714285714285714, 1.0, 1.0),
(0.71428571428571419, 1.0, 1.0),
(0.8571428571428571, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
_Cat12_data = {
'blue': [(0.0, 0.5, 0.5),
(0.090909090909090912, 0.0, 0.0),
(0.18181818181818182, 0.59999999999999998, 0.59999999999999998),
(0.27272727272727271, 0.20000000000000001, 0.20000000000000001),
(0.36363636363636365, 0.55000000000000004, 0.55000000000000004),
(0.45454545454545459, 0.0, 0.0),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 1.0, 1.0),
(0.72727272727272729, 1.0, 1.0),
(0.81818181818181823, 1.0, 1.0),
(0.90909090909090917, 0.75, 0.75),
(1.0, 0.20000000000000001, 0.20000000000000001)],
'green': [(0.0, 0.75, 0.75),
(0.090909090909090912, 0.5, 0.5),
(0.18181818181818182, 1.0, 1.0),
(0.27272727272727271, 1.0, 1.0),
(0.36363636363636365, 1.0, 1.0),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 0.93000000000000005, 0.93000000000000005),
(0.63636363636363635, 0.69999999999999996, 0.69999999999999996),
(0.72727272727272729, 0.75, 0.75),
(0.81818181818181823, 0.29999999999999999, 0.29999999999999999),
(0.90909090909090917, 0.59999999999999998, 0.59999999999999998),
(1.0, 0.10000000000000001, 0.10000000000000001)],
'red': [(0.0, 1.0, 1.0),
(0.090909090909090912, 1.0, 1.0),
(0.18181818181818182, 1.0, 1.0),
(0.27272727272727271, 1.0, 1.0),
(0.36363636363636365, 0.69999999999999996, 0.69999999999999996),
(0.45454545454545459, 0.20000000000000001, 0.20000000000000001),
(0.54545454545454541, 0.65000000000000002, 0.65000000000000002),
(0.63636363636363635, 0.10000000000000001, 0.10000000000000001),
(0.72727272727272729, 0.80000000000000004, 0.80000000000000004),
(0.81818181818181823, 0.40000000000000002, 0.40000000000000002),
(0.90909090909090917, 1.0, 1.0),
(1.0, 0.90000000000000002, 0.90000000000000002)]}
_BuGy8_data = {
'blue': [(0.0, 0.80000000000000004, 0.80000000000000004),
(0.14285714285714285, 1.0, 1.0),
(0.2857142857142857, 1.0, 1.0),
(0.42857142857142855, 1.0, 1.0),
(0.5714285714285714, 0.90000000000000002, 0.90000000000000002),
(0.71428571428571419, 0.59999999999999998, 0.59999999999999998),
(0.8571428571428571, 0.40000000000000002, 0.40000000000000002),
(1.0, 0.20000000000000001, 0.20000000000000001)],
'green': [(0.0, 0.59999999999999998, 0.59999999999999998),
(0.14285714285714285, 0.90000000000000002, 0.90000000000000002),
(0.2857142857142857, 1.0, 1.0),
(0.42857142857142855, 1.0, 1.0),
(0.5714285714285714, 0.90000000000000002, 0.90000000000000002),
(0.71428571428571419, 0.59999999999999998, 0.59999999999999998),
(0.8571428571428571, 0.40000000000000002, 0.40000000000000002),
(1.0, 0.20000000000000001, 0.20000000000000001)],
'red': [(0.0, 0.0, 0.0),
(0.14285714285714285, 0.40000000000000002, 0.40000000000000002),
(0.2857142857142857, 0.59999999999999998, 0.59999999999999998),
(0.42857142857142855, 0.80000000000000004, 0.80000000000000004),
(0.5714285714285714, 0.90000000000000002, 0.90000000000000002),
(0.71428571428571419, 0.59999999999999998, 0.59999999999999998),
(0.8571428571428571, 0.40000000000000002, 0.40000000000000002),
(1.0, 0.20000000000000001, 0.20000000000000001)]}
_BuOrR14_data = {
'blue': [(0.0, 1.0, 1.0),
(0.076923076923076927, 1.0, 1.0),
(0.15384615384615385, 1.0, 1.0),
(0.23076923076923078, 1.0, 1.0),
(0.30769230769230771, 1.0, 1.0),
(0.38461538461538464, 1.0, 1.0),
(0.46153846153846156, 1.0, 1.0),
(0.53846153846153855, 0.80000000000000004, 0.80000000000000004),
(0.61538461538461542, 0.59999999999999998, 0.59999999999999998),
(0.69230769230769229, 0.0, 0.0),
(0.76923076923076927, 0.0, 0.0),
(0.84615384615384626, 0.0, 0.0),
(0.92307692307692313, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.35299999999999998, 0.35299999999999998),
(0.076923076923076927, 0.46700000000000003, 0.46700000000000003),
(0.15384615384615385, 0.56699999999999995, 0.56699999999999995),
(0.23076923076923078, 0.69999999999999996, 0.69999999999999996),
(0.30769230769230771, 0.83299999999999996, 0.83299999999999996),
(0.38461538461538464, 0.93300000000000005, 0.93300000000000005),
(0.46153846153846156, 0.97999999999999998, 0.97999999999999998),
(0.53846153846153855, 1.0, 1.0),
(0.61538461538461542, 1.0, 1.0),
(0.69230769230769229, 1.0, 1.0),
(0.76923076923076927, 0.80000000000000004, 0.80000000000000004),
(0.84615384615384626, 0.59999999999999998, 0.59999999999999998),
(0.92307692307692313, 0.40000000000000002, 0.40000000000000002),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.029999999999999999, 0.029999999999999999),
(0.076923076923076927, 0.20000000000000001, 0.20000000000000001),
(0.15384615384615385, 0.34999999999999998, 0.34999999999999998),
(0.23076923076923078, 0.55000000000000004, 0.55000000000000004),
(0.30769230769230771, 0.75, 0.75),
(0.38461538461538464, 0.90000000000000002, 0.90000000000000002),
(0.46153846153846156, 0.96999999999999997, 0.96999999999999997),
(0.53846153846153855, 1.0, 1.0),
(0.61538461538461542, 1.0, 1.0),
(0.69230769230769229, 1.0, 1.0),
(0.76923076923076927, 1.0, 1.0),
(0.84615384615384626, 1.0, 1.0),
(0.92307692307692313, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
_BuOr10_data = {
'blue': [(0.0, 1.0, 1.0),
(0.1111111111111111, 1.0, 1.0),
(0.22222222222222221, 1.0, 1.0),
(0.33333333333333331, 1.0, 1.0),
(0.44444444444444442, 1.0, 1.0),
(0.55555555555555558, 0.80000000000000004, 0.80000000000000004),
(0.66666666666666663, 0.59999999999999998, 0.59999999999999998),
(0.77777777777777768, 0.40000000000000002, 0.40000000000000002),
(0.88888888888888884, 0.20000000000000001, 0.20000000000000001),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.33300000000000002, 0.33300000000000002),
(0.1111111111111111, 0.59999999999999998, 0.59999999999999998),
(0.22222222222222221, 0.80000000000000004, 0.80000000000000004),
(0.33333333333333331, 0.93300000000000005, 0.93300000000000005),
(0.44444444444444442, 1.0, 1.0),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 0.93300000000000005, 0.93300000000000005),
(0.77777777777777768, 0.80000000000000004, 0.80000000000000004),
(0.88888888888888884, 0.59999999999999998, 0.59999999999999998),
(1.0, 0.33300000000000002, 0.33300000000000002)],
'red': [(0.0, 0.0, 0.0),
(0.1111111111111111, 0.20000000000000001, 0.20000000000000001),
(0.22222222222222221, 0.40000000000000002, 0.40000000000000002),
(0.33333333333333331, 0.59999999999999998, 0.59999999999999998),
(0.44444444444444442, 0.80000000000000004, 0.80000000000000004),
(0.55555555555555558, 1.0, 1.0),
(0.66666666666666663, 1.0, 1.0),
(0.77777777777777768, 1.0, 1.0),
(0.88888888888888884, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
_BuDRd12_data = {
'blue': [(0.0, 0.84999999999999998, 0.84999999999999998),
(0.090909090909090912, 1.0, 1.0),
(0.18181818181818182, 1.0, 1.0),
(0.27272727272727271, 1.0, 1.0),
(0.36363636363636365, 1.0, 1.0),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 0.75, 0.75),
(0.63636363636363635, 0.59999999999999998, 0.59999999999999998),
(0.72727272727272729, 0.45000000000000001, 0.45000000000000001),
(0.81818181818181823, 0.37, 0.37),
(0.90909090909090917, 0.19600000000000001, 0.19600000000000001),
(1.0, 0.13, 0.13)],
'green': [(0.0, 0.042999999999999997, 0.042999999999999997),
(0.090909090909090912, 0.30599999999999999, 0.30599999999999999),
(0.18181818181818182, 0.63, 0.63),
(0.27272727272727271, 0.85299999999999998, 0.85299999999999998),
(0.36363636363636365, 0.97299999999999998, 0.97299999999999998),
(0.45454545454545459, 1.0, 1.0),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 0.88, 0.88),
(0.72727272727272729, 0.67900000000000005, 0.67900000000000005),
(0.81818181818181823, 0.42999999999999999, 0.42999999999999999),
(0.90909090909090917, 0.14999999999999999, 0.14999999999999999),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.16400000000000001, 0.16400000000000001),
(0.090909090909090912, 0.14999999999999999, 0.14999999999999999),
(0.18181818181818182, 0.25, 0.25),
(0.27272727272727271, 0.45000000000000001, 0.45000000000000001),
(0.36363636363636365, 0.67000000000000004, 0.67000000000000004),
(0.45454545454545459, 0.88, 0.88),
(0.54545454545454541, 1.0, 1.0),
(0.63636363636363635, 1.0, 1.0),
(0.72727272727272729, 1.0, 1.0),
(0.81818181818181823, 0.96999999999999997, 0.96999999999999997),
(0.90909090909090917, 0.84999999999999998, 0.84999999999999998),
(1.0, 0.65000000000000002, 0.65000000000000002)]}
_REF_pmarsh_data = {
'blue' : [(0.00,0.24,0.24),
(0.10,0.75,0.75),
(0.15,1.00,1.00),
(0.20,1.00,1.00),
(0.25,0.00,0.00),
(0.35,0.00,0.00),
(0.40,0.00,0.00),
(0.50,0.25,0.25),
(0.60,0.00,0.00),
(0.65,0.00,0.00),
(0.75,1.00,1.00),
(0.87,1.00,1.00),
(1.00,1.00,1.00)],
'green': [(0.00,0.24,0.24),
(0.10,0.70,0.70),
(0.15,0.50,0.50),
(0.20,0.00,0.00),
(0.25,0.50,0.50),
(0.35,1.00,1.00),
(0.40,1.00,1.00),
(0.50,0.25,0.25),
(0.60,0.00,0.00),
(0.65,0.00,0.00),
(0.75,0.00,0.00),
(0.87,1.00,1.00),
(1.00,1.00,1.00)],
'red' : [(0.00,0.24,0.24),
(0.10,0.70,0.70),
(0.15,0.50,0.50),
(0.20,0.00,0.00),
(0.25,0.00,0.00),
(0.35,0.00,0.00),
(0.40,1.00,1.00),
(0.50,1.00,1.00),
(0.60,0.50,0.50),
(0.65,0.50,0.50),
(0.75,1.00,1.00),
(0.87,1.00,1.00),
(1.00,1.00,1.00)]}
_REF_default_data = {
'blue' : [(0.000,1.000,1.000),
(0.076,1.000,1.000),
(0.153,0.804,0.804),
(0.230,0.000,0.000),
(0.307,0.000,0.000),
(0.384,0.000,0.000),
(0.461,0.000,0.000),
(0.538,0.043,0.043),
(0.615,0.000,0.000),
(0.692,0.000,0.000),
(0.769,0.000,0.000),
(0.846,0.000,0.000),
(0.923,0.537,0.537),
(1.000,0.078,0.078)],
'green': [(0.000,1.000,1.000),
(0.076,0.566,0.566),
(0.153,0.000,0.000),
(0.230,0.988,0.988),
(0.307,0.933,0.933),
(0.384,0.545,0.545),
(0.461,1.000,1.000),
(0.538,0.525,0.525),
(0.615,0.549,0.549),
(0.692,0.000,0.000),
(0.769,0.000,0.000),
(0.846,0.000,0.000),
(0.923,0.070,0.070),
(1.000,0.196,0.196)],
'red' : [(0.000,0.000,0.000),
(0.076,0.118,0.118),
(0.153,0.000,0.000),
(0.230,0.482,0.482),
(0.307,0.000,0.000),
(0.384,0.000,0.000),
(0.461,1.000,1.000),
(0.538,0.726,0.726),
(0.615,1.000,1.000),
(0.692,1.000,1.000),
(0.769,0.804,0.804),
(0.846,0.549,0.549),
(0.923,0.933,0.933),
(1.000,0.604,0.604)]}
_Not_PosDef_Default_data = {
'blue' : [(0.000,1.000,1.000),
(0.052,0.000,0.000),
(0.105,0.850,0.850),
(0.157,0.970,0.970),
(0.210,1.000,1.000),
(0.263,1.000,1.000),
(0.315,1.000,1.000),
(0.368,1.000,1.000),
(0.421,1.000,1.000),
(0.473,1.000,1.000),
(0.526,1.000,1.000),
(0.578,0.920,0.920),
(0.631,0.740,0.740),
(0.684,0.600,0.600),
(0.736,0.460,0.460),
(0.789,0.340,0.340),
(0.842,0.240,0.240),
(0.894,0.210,0.210),
(0.947,0.187,0.187),
(1.000,0.130,0.130)],
'green': [(0.000,1.000,1.000),
(0.052,0.000,0.000),
(0.105,0.000,0.000),
(0.157,0.112,0.112),
(0.210,0.342,0.342),
(0.263,0.531,0.531),
(0.315,0.692,0.692),
(0.368,0.829,0.829),
(0.421,0.920,0.920),
(0.473,0.978,0.978),
(0.526,1.000,1.000),
(0.578,1.000,1.000),
(0.631,0.948,0.948),
(0.684,0.840,0.840),
(0.736,0.676,0.676),
(0.789,0.472,0.472),
(0.842,0.240,0.240),
(0.894,0.155,0.155),
(0.947,0.085,0.085),
(1.000,0.000,0.000)],
'red' : [(0.000,1.000,1.000),
(0.052,0.000,0.000),
(0.105,0.142,0.142),
(0.157,0.097,0.097),
(0.210,0.160,0.160),
(0.263,0.240,0.240),
(0.315,0.340,0.340),
(0.368,0.460,0.460),
(0.421,0.600,0.600),
(0.473,0.740,0.740),
(0.526,0.880,0.880),
(0.578,1.000,1.000),
(0.631,1.000,1.000),
(0.684,1.000,1.000),
(0.736,1.000,1.000),
(0.789,1.000,1.000),
(0.842,1.000,1.000),
(0.894,0.970,0.970),
(0.947,0.850,0.850),
(1.000,0.650,0.650)]}
_Positive_Definite_data = {
'blue' : [(0.000,1.000,1.000),
(0.100,0.000,0.000),
(0.200,1.000,1.000),
(0.300,0.740,0.740),
(0.400,0.600,0.600),
(0.500,0.460,0.460),
(0.600,0.340,0.340),
(0.700,0.240,0.240),
(0.800,0.210,0.210),
(0.900,0.187,0.187),
(1.000,0.130,0.130)],
'green': [(0.000,1.000,1.000),
(0.100,0.000,0.000),
(0.200,1.000,1.000),
(0.300,0.948,0.948),
(0.400,0.840,0.840),
(0.500,0.676,0.676),
(0.600,0.472,0.472),
(0.700,0.240,0.240),
(0.800,0.155,0.155),
(0.900,0.085,0.085),
(1.000,0.000,0.000)],
'red' : [(0.000,1.000,1.000),
(0.100,0.000,0.000),
(0.200,1.000,1.000),
(0.300,1.000,1.000),
(0.400,1.000,1.000),
(0.500,1.000,1.000),
(0.600,1.000,1.000),
(0.700,1.000,1.000),
(0.800,0.970,0.970),
(0.900,0.850,0.850),
(1.000,0.650,0.650)]}
datad = {}
current_locals = locals()
current_local_keys = list(current_locals.keys())
for name in current_local_keys:
if name.endswith('_data'):
newname = name[1:-5]
#Put data for colortable into dictionary under new name
datad[newname] = current_locals[name]
#Create colortable from data and place it in local namespace under new name
current_locals[newname] = colors.LinearSegmentedColormap(newname, current_locals[name],
LUTSIZE)
#Stolen shamelessly from matplotlib.cm
def get_cmap(name, lut=None):
if lut is None: lut = LUTSIZE
#If lut is < 0, then return the table with only levels originally defined
if lut < 0:
lut = len(datad[name]['red'])
return colors.LinearSegmentedColormap(name, datad[name], lut)
#Taken from the matplotlib cookbook
def cmap_map(function,cmap):
""" Applies function (which should operate on vectors of shape 3:
[r, g, b], on colormap cmap. This routine will break any discontinuous
points in a colormap.
Example usage:
light_jet = cmap_map(lambda x: x/2+0.5, cm.jet)
"""
cdict = cmap._segmentdata
step_dict = {}
# First get the list of points where the segments start or end
for key in ('red','green','blue'):
step_dict[key] = map(lambda x: x[0], cdict[key])
step_list = reduce(lambda x, y: x+y, step_dict.values())
step_list = array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : array(cmap(step)[0:3])
old_LUT = array(map( reduced_cmap, step_list))
new_LUT = array(map( function, old_LUT))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i,key in enumerate(('red','green','blue')):
this_cdict = {}
for j,step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j,i]
elif new_LUT[j,i]!=old_LUT[j,i]:
this_cdict[step] = new_LUT[j,i]
colorvector= map(lambda x: x + (x[1], ), this_cdict.items())
colorvector.sort()
cdict[key] = colorvector
return colors.LinearSegmentedColormap('colormap',cdict,1024)
if __name__ == '__main__':
import numpy, pylab
a=numpy.outer(numpy.arange(0,1,0.01),numpy.ones(10))
pylab.figure(figsize=(10,7))
pylab.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)
maps=[m for m in datad.keys() if not m.endswith("_r")]
maps.sort()
l=len(maps)+1
i=1
for m in maps:
pylab.subplot(1,l,i)
pylab.axis("off")
pylab.imshow(a,aspect='auto',cmap=locals()[m],origin="lower")
pylab.title(m,rotation=90,fontsize=10)
i=i+1
pylab.show()
| bsd-2-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tests/test_internals.py | 1 | 20693 | # pylint: disable=W0102
import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
import pandas.core.internals as internals
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert(left.items.equals(right.items))
assert(left.ref_items.equals(right.ref_items))
def get_float_mat(n, k, dtype):
return np.repeat(np.atleast_2d(np.arange(k, dtype=dtype)), n, axis=0)
TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 's1', 's2']
N = 10
def get_float_ex(cols=['a', 'c', 'e'], dtype = np.float_):
floats = get_float_mat(N, len(cols), dtype = dtype).T
return make_block(floats, cols, TEST_COLS)
def get_complex_ex(cols=['h']):
complexes = (get_float_mat(N, 1, dtype = np.float_).T * 1j).astype(np.complex128)
return make_block(complexes, cols, TEST_COLS)
def get_obj_ex(cols=['b', 'd']):
mat = np.empty((N, 2), dtype=object)
mat[:, 0] = 'foo'
mat[:, 1] = 'bar'
return make_block(mat.T, cols, TEST_COLS)
def get_bool_ex(cols=['f']):
mat = np.ones((N, 1), dtype=bool)
return make_block(mat.T, cols, TEST_COLS)
def get_int_ex(cols=['g'], dtype = np.int_):
mat = randn(N, 1).astype(dtype)
return make_block(mat.T, cols, TEST_COLS)
def get_dt_ex(cols=['h']):
mat = randn(N, 1).astype(int).astype('M8[ns]')
return make_block(mat.T, cols, TEST_COLS)
def get_sparse_ex1():
sa1 = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
return make_block(sa1, ['s1'], TEST_COLS)
def get_sparse_ex2():
sa2 = SparseArray([0, 0, 2, 3, 4, 0, 6, 7, 0, 8], fill_value=0)
return make_block(sa2, ['s2'], TEST_COLS)
def create_blockmanager(blocks):
l = []
for b in blocks:
l.extend(b.items)
items = Index(l)
for b in blocks:
b.ref_items = items
index_sz = blocks[0].shape[1]
return BlockManager(blocks, [items, np.arange(index_sz)])
def create_singleblockmanager(blocks):
l = []
for b in blocks:
l.extend(b.items)
items = Index(l)
for b in blocks:
b.ref_items = items
return SingleBlockManager(blocks, [items])
class TestBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.fblock = get_float_ex()
self.cblock = get_complex_ex()
self.oblock = get_obj_ex()
self.bool_block = get_bool_ex()
self.int_block = get_int_ex()
def test_constructor(self):
int32block = get_int_ex(['a'],dtype = np.int32)
self.assert_(int32block.dtype == np.int32)
def test_pickle(self):
import pickle
def _check(blk):
pickled = pickle.dumps(blk)
unpickled = pickle.loads(pickled)
assert_block_equal(blk, unpickled)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_ref_locs(self):
assert_almost_equal(self.fblock.ref_locs, [0, 2, 4])
def test_attrs(self):
self.assert_(self.fblock.shape == self.fblock.values.shape)
self.assert_(self.fblock.dtype == self.fblock.values.dtype)
self.assert_(len(self.fblock) == len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = ['e', 'a', 'b', 'd', 'f']
ablock = make_block(avals, ['e', 'b'], ref_cols)
bblock = make_block(bvals, ['a', 'd'], ref_cols)
merged = ablock.merge(bblock)
exvals = np.vstack((avals, bvals))
excols = ['e', 'b', 'a', 'd']
eblock = make_block(exvals, excols, ref_cols)
eblock = eblock.reindex_items_from(ref_cols)
assert_block_equal(merged, eblock)
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assert_(cop is not self.fblock)
assert_block_equal(self.fblock, cop)
def test_items(self):
cols = self.fblock.items
self.assert_(np.array_equal(cols, ['a', 'c', 'e']))
cols2 = self.fblock.items
self.assert_(cols is cols2)
def test_assign_ref_items(self):
new_cols = Index(['foo', 'bar', 'baz', 'quux', 'hi'])
self.fblock.set_ref_items(new_cols)
self.assert_(np.array_equal(self.fblock.items,
['foo', 'baz', 'hi']))
def test_reindex_index(self):
pass
def test_reindex_items_from(self):
new_cols = Index(['e', 'b', 'c', 'f'])
reindexed = self.fblock.reindex_items_from(new_cols)
assert_almost_equal(reindexed.ref_locs, [0, 2])
self.assertEquals(reindexed.values.shape[0], 2)
self.assert_((reindexed.values[0] == 2).all())
self.assert_((reindexed.values[1] == 1).all())
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.delete('a')
assert_almost_equal(newb.ref_locs, [2, 4])
self.assert_((newb.values[0] == 1).all())
newb = self.fblock.delete('c')
assert_almost_equal(newb.ref_locs, [0, 4])
self.assert_((newb.values[1] == 2).all())
newb = self.fblock.delete('e')
assert_almost_equal(newb.ref_locs, [0, 2])
self.assert_((newb.values[1] == 1).all())
self.assertRaises(Exception, self.fblock.delete, 'b')
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
bblock = get_bool_ex(['f'])
bs = list(bblock.split_block_at('f'))
self.assertEqual(len(bs), 0)
def test_unicode_repr(self):
mat = np.empty((N, 2), dtype=object)
mat[:, 0] = 'foo'
mat[:, 1] = 'bar'
cols = ['b', u("\u05d0")]
str_repr = repr(make_block(mat.T, cols, TEST_COLS))
def test_get(self):
pass
def test_set(self):
pass
def test_fillna(self):
pass
def test_repr(self):
pass
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.blocks = [get_float_ex(),
get_obj_ex(),
get_bool_ex(),
get_int_ex(),
get_complex_ex()]
all_items = [b.items for b in self.blocks]
items = sorted(all_items[0].append(all_items[1:]))
items = Index(items)
for b in self.blocks:
b.ref_items = items
self.mgr = BlockManager(self.blocks, [items, np.arange(N)])
def test_constructor_corner(self):
pass
def test_attrs(self):
self.assertEquals(self.mgr.nblocks, len(self.mgr.blocks))
self.assertEquals(len(self.mgr), len(self.mgr.items))
def test_is_mixed_dtype(self):
self.assert_(self.mgr.is_mixed_type)
mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])])
self.assert_(not mgr.is_mixed_type)
def test_is_indexed_like(self):
self.assert_(self.mgr._is_indexed_like(self.mgr))
mgr2 = self.mgr.reindex_axis(np.arange(N - 1), axis=1)
self.assert_(not self.mgr._is_indexed_like(mgr2))
def test_block_id_vector_item_dtypes(self):
expected = [0, 1, 0, 1, 0, 2, 3, 4]
result = self.mgr.block_id_vector
assert_almost_equal(expected, result)
result = self.mgr.item_dtypes
# as the platform may not exactly match this, pseudo match
expected = ['float64', 'object', 'float64', 'object', 'float64',
'bool', 'int64', 'complex128']
for e, r in zip(expected, result):
np.dtype(e).kind == np.dtype(r).kind
def test_duplicate_item_failure(self):
items = Index(['a', 'a'])
blocks = [get_bool_ex(['a']), get_float_ex(['a'])]
for b in blocks:
b.ref_items = items
# test trying to create _ref_locs with/o ref_locs set on the blocks
self.assertRaises(AssertionError, BlockManager, blocks, [items, np.arange(N)])
blocks[0].set_ref_locs([0])
blocks[1].set_ref_locs([1])
mgr = BlockManager(blocks, [items, np.arange(N)])
mgr.iget(1)
# invalidate the _ref_locs
for b in blocks:
b._ref_locs = None
mgr._ref_locs = None
mgr._items_map = None
self.assertRaises(AssertionError, mgr._set_ref_locs, do_refs=True)
def test_contains(self):
self.assert_('a' in self.mgr)
self.assert_('baz' not in self.mgr)
def test_pickle(self):
import pickle
pickled = pickle.dumps(self.mgr)
mgr2 = pickle.loads(pickled)
# same result
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
self.assert_(mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_get(self):
pass
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item)[i]
assert_almost_equal(res, exp)
def test_set(self):
pass
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assert_(self.mgr.get('baz').dtype == np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assert_(mgr2.get('baz').dtype == np.object_)
mgr2.set('quux', randn(N).astype(int))
self.assert_(mgr2.get('quux').dtype == np.int_)
mgr2.set('quux', randn(N))
self.assert_(mgr2.get('quux').dtype == np.float_)
def test_copy(self):
shallow = self.mgr.copy(deep=False)
# we don't guaranteee block ordering
for blk in self.mgr.blocks:
found = False
for cp_blk in shallow.blocks:
if cp_blk.values is blk.values:
found = True
break
self.assert_(found == True)
def test_sparse(self):
mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2()])
# what to test here?
self.assert_(mgr.as_matrix().dtype == np.float64)
def test_sparse_mixed(self):
mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2(),get_float_ex()])
self.assert_(len(mgr.blocks) == 3)
self.assert_(isinstance(mgr,BlockManager))
# what to test here?
def test_as_matrix_float(self):
mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16), get_float_ex(['e'],np.float64)])
self.assert_(mgr.as_matrix().dtype == np.float64)
mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16)])
self.assert_(mgr.as_matrix().dtype == np.float32)
def test_as_matrix_int_bool(self):
mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])])
self.assert_(mgr.as_matrix().dtype == np.bool_)
mgr = create_blockmanager([get_int_ex(['a'],np.int64), get_int_ex(['b'],np.int64), get_int_ex(['c'],np.int32), get_int_ex(['d'],np.int16), get_int_ex(['e'],np.uint8) ])
self.assert_(mgr.as_matrix().dtype == np.int64)
mgr = create_blockmanager([get_int_ex(['c'],np.int32), get_int_ex(['d'],np.int16), get_int_ex(['e'],np.uint8) ])
self.assert_(mgr.as_matrix().dtype == np.int32)
def test_as_matrix_datetime(self):
mgr = create_blockmanager([get_dt_ex(['h']), get_dt_ex(['g'])])
self.assert_(mgr.as_matrix().dtype == 'M8[ns]')
def test_astype(self):
# coerce all
mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16), get_float_ex(['e'],np.float64)])
for t in ['float16','float32','float64','int32','int64']:
tmgr = mgr.astype(t)
self.assert_(tmgr.as_matrix().dtype == np.dtype(t))
# mixed
mgr = create_blockmanager([get_obj_ex(['a','b']),get_bool_ex(['c']),get_dt_ex(['d']),get_float_ex(['e'],np.float32), get_float_ex(['f'],np.float16), get_float_ex(['g'],np.float64)])
for t in ['float16','float32','float64','int32','int64']:
tmgr = mgr.astype(t, raise_on_error = False).get_numeric_data()
self.assert_(tmgr.as_matrix().dtype == np.dtype(t))
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
self.assert_(len(old_blocks) == len(new_blocks))
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
self.assert_(found == True)
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
self.assert_(found == True)
# noops
mgr = create_blockmanager([get_int_ex(['f']), get_float_ex(['g'])])
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
mgr = create_blockmanager([get_obj_ex(['a','b']), get_int_ex(['f']), get_float_ex(['g'])])
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
# there could atcually be multiple dtypes resulting
def _check(new_mgr,block_type, citems):
items = set()
for b in new_mgr.blocks:
if isinstance(b,block_type):
for i in list(b.items):
items.add(i)
self.assert_(items == set(citems))
# convert
mat = np.empty((N, 3), dtype=object)
mat[:, 0] = '1'
mat[:, 1] = '2.'
mat[:, 2] = 'foo'
b = make_block(mat.T, ['a','b','foo'], TEST_COLS)
mgr = create_blockmanager([b, get_int_ex(['f']), get_float_ex(['g'])])
new_mgr = mgr.convert(convert_numeric = True)
_check(new_mgr,FloatBlock,['b','g'])
_check(new_mgr,IntBlock,['a','f'])
mgr = create_blockmanager([b, get_int_ex(['f'],np.int32), get_bool_ex(['bool']), get_dt_ex(['dt']),
get_int_ex(['i'],np.int64), get_float_ex(['g'],np.float64), get_float_ex(['h'],np.float16)])
new_mgr = mgr.convert(convert_numeric = True)
_check(new_mgr,FloatBlock,['b','g','h'])
_check(new_mgr,IntBlock,['a','f','i'])
_check(new_mgr,ObjectBlock,['foo'])
_check(new_mgr,BoolBlock,['bool'])
_check(new_mgr,DatetimeBlock,['dt'])
def test_interleave(self):
pass
def test_interleave_non_unique_cols(self):
df = DataFrame([
[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
np.testing.assert_array_equal(df_unique.values, df.values)
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self):
self.mgr.set('f', randn(N))
self.mgr.set('d', randn(N))
self.mgr.set('b', randn(N))
self.mgr.set('g', randn(N))
self.mgr.set('h', randn(N))
cons = self.mgr.consolidate()
self.assertEquals(cons.nblocks, 1)
self.assert_(cons.blocks[0].items.equals(cons.items))
def test_reindex_index(self):
pass
def test_reindex_items(self):
def _check_cols(before, after, cols):
for col in cols:
assert_almost_equal(after.get(col), before.get(col))
# not consolidated
vals = randn(N)
self.mgr.set('g', vals)
reindexed = self.mgr.reindex_items(['g', 'c', 'a', 'd'])
self.assertEquals(reindexed.nblocks, 2)
assert_almost_equal(reindexed.get('g'), vals.squeeze())
_check_cols(self.mgr, reindexed, ['c', 'a', 'd'])
def test_xs(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mgr.set_axis(1, index)
result = self.mgr.xs('bar', axis=1)
expected = self.mgr.get_slice(slice(3, 5), axis=1)
assert_frame_equal(DataFrame(result), DataFrame(expected))
def test_get_numeric_data(self):
int_ser = Series(np.array([0, 1, 2]))
float_ser = Series(np.array([0., 1., 2.]))
complex_ser = Series(np.array([0j, 1j, 2j]))
str_ser = Series(np.array(['a', 'b', 'c']))
bool_ser = Series(np.array([True, False, True]))
obj_ser = Series(np.array([1, 'a', 5]))
dt_ser = Series(tm.makeDateIndex(3))
# check types
df = DataFrame({'int': int_ser, 'float': float_ser,
'complex': complex_ser, 'str': str_ser,
'bool': bool_ser, 'obj': obj_ser,
'dt': dt_ser})
xp = DataFrame({'int': int_ser, 'float': float_ser,
'complex': complex_ser, 'bool': bool_ser})
rs = DataFrame(df._data.get_numeric_data())
assert_frame_equal(xp, rs)
xp = DataFrame({'bool': bool_ser})
rs = DataFrame(df._data.get_bool_data())
assert_frame_equal(xp, rs)
rs = DataFrame(df._data.get_bool_data())
df.ix[0, 'bool'] = not df.ix[0, 'bool']
self.assertEqual(rs.ix[0, 'bool'], df.ix[0, 'bool'])
rs = DataFrame(df._data.get_bool_data(copy=True))
df.ix[0, 'bool'] = not df.ix[0, 'bool']
self.assertEqual(rs.ix[0, 'bool'], not df.ix[0, 'bool'])
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.ix[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
index = Index(list('abcdef'))
block1 = make_block(np.arange(12).reshape(3,4), list('abc'), index)
block2 = make_block(np.arange(12).reshape(3,4)*10, list('def'), index)
block1.ref_items = block2.ref_items = index
bm1 = BlockManager([block1, block2], [index, np.arange(block1.shape[1])])
bm2 = BlockManager([block2, block1], [index, np.arange(block1.shape[1])])
self.assert_(bm1.equals(bm2))
# non-unique items
index = Index(list('aaabbb'))
block1 = make_block(np.arange(12).reshape(3,4), list('aaa'), index,
placement=[0,1,2])
block2 = make_block(np.arange(12).reshape(3,4)*10, list('bbb'), index,
placement=[3,4,5])
block1.ref_items = block2.ref_items = index
bm1 = BlockManager([block1, block2], [index, np.arange(block1.shape[1])])
bm2 = BlockManager([block2, block1], [index, np.arange(block1.shape[1])])
self.assert_(bm1.equals(bm2))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
kevin-intel/scikit-learn | sklearn/utils/tests/test_encode.py | 11 | 7331 | import pickle
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.utils._encode import _unique
from sklearn.utils._encode import _encode
from sklearn.utils._encode import _check_unknown
@pytest.mark.parametrize(
"values, expected",
[(np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array([1, 2, 3], dtype='int64')),
(np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object)),
(np.array(['b', 'a', 'c', 'a', 'c']),
np.array(['a', 'b', 'c']))],
ids=['int64', 'object', 'str'])
def test_encode_util(values, expected):
uniques = _unique(values)
assert_array_equal(uniques, expected)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
def test_encode_with_check_unknown():
# test for the check_unknown parameter of _encode()
uniques = np.array([1, 2, 3])
values = np.array([1, 2, 3, 4])
# Default is True, raise error
with pytest.raises(ValueError,
match='y contains previously unseen labels'):
_encode(values, uniques=uniques, check_unknown=True)
# dont raise error if False
_encode(values, uniques=uniques, check_unknown=False)
# parameter is ignored for object dtype
uniques = np.array(['a', 'b', 'c'], dtype=object)
values = np.array(['a', 'b', 'c', 'd'], dtype=object)
with pytest.raises(ValueError,
match='y contains previously unseen labels'):
_encode(values, uniques=uniques, check_unknown=False)
def _assert_check_unknown(values, uniques, expected_diff, expected_mask):
diff = _check_unknown(values, uniques)
assert_array_equal(diff, expected_diff)
diff, valid_mask = _check_unknown(values, uniques, return_mask=True)
assert_array_equal(diff, expected_diff)
assert_array_equal(valid_mask, expected_mask)
@pytest.mark.parametrize("values, uniques, expected_diff, expected_mask", [
(np.array([1, 2, 3, 4]),
np.array([1, 2, 3]),
[4],
[True, True, True, False]),
(np.array([2, 1, 4, 5]),
np.array([2, 5, 1]),
[4],
[True, True, False, True]),
(np.array([2, 1, np.nan]),
np.array([2, 5, 1]),
[np.nan],
[True, True, False]),
(np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True]),
(np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1]),
[4, np.nan],
[True, True, False, False]),
(np.array([2, 1, 4, 5]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True]),
(np.array(['a', 'b', 'c', 'd'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
np.array(['d'], dtype=object),
[True, True, True, False]),
(np.array(['d', 'c', 'a', 'b'], dtype=object),
np.array(['a', 'c', 'b'], dtype=object),
np.array(['d'], dtype=object),
[False, True, True, True]),
(np.array(['a', 'b', 'c', 'd']),
np.array(['a', 'b', 'c']),
np.array(['d']),
[True, True, True, False]),
(np.array(['d', 'c', 'a', 'b']),
np.array(['a', 'c', 'b']),
np.array(['d']),
[False, True, True, True]),
])
def test_check_unknown(values, uniques, expected_diff, expected_mask):
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize("missing_value", [None, np.nan, float('nan')])
@pytest.mark.parametrize('pickle_uniques', [True, False])
def test_check_unknown_missing_values(missing_value, pickle_uniques):
# check for check_unknown with missing values with object dtypes
values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object)
uniques = np.array(['c', 'a', 'b', missing_value], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ['d']
expected_mask = [False, True, True, True, True]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object)
uniques = np.array(['c', 'a', 'b'], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ['d', missing_value]
expected_mask = [False, True, True, True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(['a', missing_value], dtype=object)
uniques = np.array(['a', 'b', 'z'], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = [missing_value]
expected_mask = [True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize('missing_value', [np.nan, None, float('nan')])
@pytest.mark.parametrize('pickle_uniques', [True, False])
def test_unique_util_missing_values_objects(missing_value, pickle_uniques):
# check for _unique and _encode with missing values with object dtypes
values = np.array(['a', 'c', 'c', missing_value, 'b'], dtype=object)
expected_uniques = np.array(['a', 'b', 'c', missing_value], dtype=object)
uniques = _unique(values)
if missing_value is None:
assert_array_equal(uniques, expected_uniques)
else: # missing_value == np.nan
assert_array_equal(uniques[:-1], expected_uniques[:-1])
assert np.isnan(uniques[-1])
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([0, 2, 2, 3, 1]))
def test_unique_util_missing_values_numeric():
# Check missing values in numerical values
values = np.array([3, 1, np.nan, 5, 3, np.nan], dtype=float)
expected_uniques = np.array([1, 3, 5, np.nan], dtype=float)
expected_inverse = np.array([1, 0, 3, 2, 1, 3])
uniques = _unique(values)
assert_array_equal(uniques, expected_uniques)
uniques, inverse = _unique(values, return_inverse=True)
assert_array_equal(uniques, expected_uniques)
assert_array_equal(inverse, expected_inverse)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, expected_inverse)
def test_unique_util_with_all_missing_values():
# test for all types of missing values for object dtype
values = np.array([np.nan, 'a', 'c', 'c', None, float('nan'),
None], dtype=object)
uniques = _unique(values)
assert_array_equal(uniques[:-1], ['a', 'c', None])
# last value is nan
assert np.isnan(uniques[-1])
expected_inverse = [3, 0, 1, 1, 2, 3, 2]
_, inverse = _unique(values, return_inverse=True)
assert_array_equal(inverse, expected_inverse)
def test_check_unknown_with_both_missing_values():
# test for both types of missing values for object dtype
values = np.array([np.nan, 'a', 'c', 'c', None, np.nan,
None], dtype=object)
diff = _check_unknown(values,
known_values=np.array(['a', 'c'], dtype=object))
assert diff[0] is None
assert np.isnan(diff[1])
diff, valid_mask = _check_unknown(
values, known_values=np.array(['a', 'c'], dtype=object),
return_mask=True)
assert diff[0] is None
assert np.isnan(diff[1])
assert_array_equal(valid_mask,
[False, True, True, True, False, False, False])
| bsd-3-clause |
CyclingNinja/SST_doppler_calc | SST_doppler.py | 1 | 7233 | from __future__ import print_function, division
from sunkitsst.read_cubes import read_cubes
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling import models, fitting
from scipy.optimize import minimize
#from scipy.signal import argrelextrema
from scipy.special import gammaln
from skimage import img_as_float
from astropy.constants import c
# need to import the _fitter_to_model_params helper function
from astropy.modeling.fitting import _fitter_to_model_params
imfile = '/data_swat/arlimb/crispex.6563.icube'
spfile = '/data_swat/arlimb/crispex.6563.sp.icube'
wave_ind = np.loadtxt('spect_ind.txt')
imheader, icube, spheader, spcube = read_cubes(imfile,spfile)
SST_cad = 2.5
SST_pix = 0.059
l_core = 6562.8
class PoissonlikeDistr(object):
# Daniela: you actually want to input an astropy model here
# not a function
def __init__(self, x, y, model):
self.x = x
self.y = y
self.model = model
return
# Daniela: evaluate needs to take a list of parameters as input
def evaluate(self, params):
# Daniela: set the input parameters in the astropy model using the
# list of parameters in params
_fitter_to_model_params(self.model, params)
# Daniela: make the mean model
mean_model = self.model(x)
# Daniela: not sure what your 'x' in this log-likelihood is, but it should be
# the mean model
loglike = np.sum(-mean_model + self.y*np.log(mean_model) - gammaln(self.y + 1))
return loglike
# Daniela: __call__ needs to take self and params as input
def __call__(self, params):
# Daniela: __call__ should return the log-likelihood
return self.evaluate(params)
small_cube = np.array(icube[100:,:, 600:,450:570])
small_cube = img_as_float(small_cube)
dop_arr = np.zeros(small_cube[0, 0, :, :].shape)
param_arr = np.zeros(small_cube[:, 0, :, :].shape)
plt.ioff()
for T in range(small_cube.shape[0]):
# define the box to do it in
for xi in range(small_cube[0].shape[1]):
for yi in range(small_cube[0].shape[2]):
# flip the y axis data points
y = small_cube[T,:, xi, yi]
ysg = y[:]
ysg -= np.min(y)
x = wave_ind
# SINGLE GAUSSIAN FITTING
# this definitley works (ish)
ysg = ysg*-1 + np.max(y)
# Daniela: is there a reason why there are round brackets around the Gaussian model?
gaus_sing = models.Gaussian1D(amplitude=np.max(ysg), mean=x[19], stddev=np.std(ysg))
# Daniela: instantiate the log-likelihood object;
# please check whether it uses the right arrays for the data
loglike_sing = PoissonlikeDistr(x, ysg, gaus_sing)
# initial parameters
init_params_s = [np.max(ysg), x[19], np.std(ysg)]
# Daniela: for maximum likelihood fitting, we need to define the *negative*
# log-likelihood:
neg_loglike_sing = lambda x: -loglike_sing(x)
# Daniela: here's the optimization:
opt_sing = minimize(neg_loglike_sing, init_params_s,
method="L-BFGS-B", tol=1.e-10)
# Daniela: print the negative log-likelihood:
#print("The value of the negative log-likelihood: " + str(opt_sing.fun))
# Daniela: the parameters at the maximum of the likelihood is in opt.x:
fit_pars = opt_sing.x
# Daniela : now we can put the parameters back into the Gaussian model
_fitter_to_model_params(gaus_sing, fit_pars)
# Bayesian information criterion
# see also: https://en.wikipedia.org/wiki/Bayesian_information_criterion
# bic = -2*loglike + n_params * log(n_datapoints)
# note to myself: opt.fun is -loglike, so we'll just use that here
bic_sing = 2.*opt_sing.fun + fit_pars.shape[0]*np.log(x.shape[0])
# Daniela: from here on, you can do the same for the model with two Gaussians
# Then you can compare the two BICs for a slightly hacky way of model
# comparison
# DOUBLE GAUSSIAN FITTING
ydg = y[:]
Imax = np.max(ydg)
gaus_double = (models.Gaussian1D(amplitude=Imax, mean=x[12], stddev=0.2) +
models.Gaussian1D(amplitude=Imax, mean=x[24], stddev=0.2))
init_params_double = [np.max(ydg), x[12], np.std(ydg),
np.max(ydg), x[24], np.std(ydg)]
loglike_double = PoissonlikeDistr(x, ysg, gaus_double)
neg_loglike_doub = lambda x: -loglike_double(x)
opt_doub = minimize(neg_loglike_doub, init_params_double,
method="L-BFGS-B", tol=1.e-10)
loglike_doub = PoissonlikeDistr(x, ydg, gaus_double)
fit_pars_dg = opt_doub.x
_fitter_to_model_params(gaus_double, fit_pars_dg)
bic_doub = 2.*opt_doub.fun + fit_pars.shape[0]*np.log(x.shape[0])
# use the bic values to assign to fit again and calc the doppler array
if bic_doub < bic_sing:
fit_sing_g_2 = fitting.LevMarLSQFitter()
gs2 = fit_sing_g_2(gaus_sing, x, ysg)
gsg = lambda x: -1 * gs2(x)
ysg = ysg*-1
t_mean = gs2.mean.value
else:
fit_doub_g_2 = fitting.LevMarLSQFitter()
ydg = y[:]
gd2 = fit_doub_g_2(gaus_double, x, ydg)
res = minimize(gd2, [6562.8],
method='L-BFGS-B',
bounds=[[x[19 - 5], x[19 + 5]],])
t_mean = res.x
dop_arr[xi,yi] = t_mean
np.save('/storage2/jet/dop_arrs/dop_arr_{:03d}.npy'.format(T), dop_arr)
print('/storage2/jet/dop_arrs/dop_arr_{:03d}.npy')
# # revert to an interpolation to find the minima
# # need to keep the regualar orientation of the y dist
# if fit_g2.fit_info['param_cov'] is None:
#
# ydg = y[:]
# Imax = np.max(ydg)
#
# g_init = (models.Gaussian1D(amplitude=Imax, mean=x[12], stddev=0.2) +
# models.Gaussian1D(amplitude=Imax, mean=x[24], stddev=0.2))
# fit_gdg = fitting.LevMarLSQFitter()
# gdg = fit_gdg(g_init, x, ydg)
#
# res = minimize(gdg, [6562.8], method='L-BFGS-B', bounds=[[x[19 - 5], x[19 + 5]],])
# t_mean = res.x
# if ((t_mean[0] - l_core) > 1) | ((t_mean[0] - l_core) < -1):
# t_mean = l_core
# dop_arr[T,xi,yi] = t_mean
#np.save('/storage2/jet/SST/dopplergram.npy', dop_arr)
## Plot the data with the best-fit model
#plt.figure(figsize=(8,5))
#plt.plot(x, y, 'ko')
#plt.plot(x, g(x), label='Gaussian')
#plt.xlabel('Position')
#plt.ylabel('Flux')
#
#plt.plot(res.x, g(res.x), 'ro')
#plt.show()
| mit |
brev/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
akalicki/genomics-csi | quality-assessment/group3_report2_question7.py | 1 | 2940 | #!/usr/bin/env python
"""
Usage:
samtools mpileup --max-depth 8000 <aln.bam> -f <ref.fasta> |
python group3_report2_question7.py
<aln.bam> should be an (indexed) alignment file and <ref.fasta> the (indexed)
human genome reference sequence.
Reads a BAM alignment file as processed by BWA and poretools, and compares it to
the reference sequence to filter matches, insertions, and mismatches to build a
confusion matrix.
"""
import sys
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
def get_confusion(f):
"""Get confusion matrix from mileup output"""
confusion = defaultdict(int)
for line in f:
info = line.split('\t')
ref_base = info[2]
read_bases = info[4]
add_dict = get_new_entries(ref_base, read_bases)
for key in add_dict:
confusion[key] += add_dict[key]
return confusion
bases = ['A','C','G','T']
def get_new_entries(ref_base, read_bases):
"""Return matching, mismatching, and indels for given base strings"""
ref_base = ref_base.upper()
entries = defaultdict(int)
i = 0
while i < len(read_bases):
char = read_bases[i].upper()
if char in ['.', ',']: # match
entries[(ref_base, ref_base)] += 1
elif char in bases: # mismatch
entries[(ref_base, char)] += 1
elif char in ['+', '-']: # indel
try:
num_indels = int(read_bases[i + 1])
except ValueError:
i += 1
continue
for j in range(num_indels):
indel = read_bases[i + j + 2].upper()
if indel not in bases:
continue
if char == '+': # insertion
entries[('-', indel)] += 1
else: # deletion
entries[(indel, '-')] += 1
i += num_indels + 2
continue
i += 1
return entries
def confusion_to_nucleotides(confusion):
"""Takes a confusion array, and prints histogram of nucleotide composition
for insertions and deletions"""
insertions = [ confusion[(c, '-')] for c in bases ]
deletions = [ confusion[('-', c)] for c in bases ]
create_barchart(bases, insertions, "Nucleotide composition of insertions", 'g')
create_barchart(bases, deletions, "Nucleotide composition of deletions", 'r')
def create_barchart(values, counts, title, col):
"""Create a pyplot barchart for given x and y values"""
ind = np.arange(len(values))
width = 0.8
fig, ax = plt.subplots()
ax.bar(ind, counts, width, color=col)
ax.set_xlabel("Bases")
ax.set_ylabel("Count")
ax.set_title(title)
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(values)
plt.show()
if __name__ == '__main__':
confusion = get_confusion(sys.stdin)
confusion_to_nucleotides(confusion)
print(confusion)
| gpl-2.0 |
mhdella/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
sybenzvi/3ML | threeML/io/triangle.py | 1 | 16115 | # -*- coding: utf-8 -*-
#Copyright (c) 2013 Daniel Foreman-Mackey
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
#This version has been modified by G.Vianello to adapt it and customize it to 3ML
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals
__all__ = ["corner", "hist2d", "error_ellipse"]
__version__ = "0.0.6"
__author__ = "Dan Foreman-Mackey ([email protected])"
__copyright__ = "Copyright 2013 Daniel Foreman-Mackey"
__contributors__ = [
# Alphabetical by first name.
"Adrian Price-Whelan @adrn",
"Brendon Brewer @eggplantbren",
"Ekta Patel @ekta1224",
"Emily Rice @emilurice",
"Geoff Ryan @geoffryan",
"Kyle Barbary @kbarbary",
"Phil Marshall @drphilmarshall",
"Pierre Gratier @pirg",
]
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Ellipse
import matplotlib.cm as cm
def corner(xs, weights=None, labels=None, extents=None, truths=None,
truth_color="#4682b4", scale_hist=True, quantiles=[],
verbose=False, plot_contours=True, plot_datapoints=True,
fig=None, **kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
labels : iterable (ndim,) (optional)
A list of names for the dimensions.
extents : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds (extents) or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
plot_datapoints : bool (optional)
Draw the individual data points.
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
"""
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError('weights must be 1-D')
if xs.shape[1] != weights.shape[0]:
raise ValueError('lengths of weights must match number of samples')
# backwards-compatibility
plot_contours = kwargs.get("smooth", plot_contours)
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.05 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
if extents is None:
extents = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in extents], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in column(s) "
"{0} have no dynamic range. Please provide an "
"`extent` argument.")
.format(", ".join(map("{0}".format,
np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
for i in range(len(extents)):
try:
emin, emax = extents[i]
except TypeError:
q = [0.5 - 0.5*extents[i], 0.5 + 0.5*extents[i]]
extents[i] = quantile(xs[i], q, weights=weights)
######################
# Added by GV
######################
#Figure out which parameters are to be made in log scale
logbins = kwargs.get("logbins", None)
if logbins is None:
#No logbins (default)
logbins = map( lambda x: False, xs )
else:
#Check that logbins is the right size
if len( logbins ) != len( xs ):
raise RuntimeError("if you specify logbins, it must have the same length of the number of parameters")
pass
######################
for i, x in enumerate(xs):
ax = axes[i, i]
# Plot the histograms.
#########################
# Modified by GV
#########################
nbins = kwargs.get("bins", 50)
if logbins[i]:
mybins = np.logspace( np.log10(x.min()), np.log10(x.max()), nbins )
else:
mybins = np.linspace( x.min(), x.max(), nbins )
n, b, p = ax.hist(x, weights=weights, bins=mybins,
range=extents[i], histtype="step",
color=kwargs.get("color", "k") )
if truths is not None:
ax.axvline(truths[i], color=truth_color)
#Plot priors
priors = kwargs.get("priors", None)
if priors is not None:
thisP = priors[i]
bc = (b[:-1] + b[1:] ) / 2.0
pvals = np.array( map(lambda x: 10.0**thisP( x ), bc) )
#Renorm to one
pvals = pvals / pvals.max()
#Plot the prior with the same maximum as the data
ax.plot(bc, pvals * n.max() , color='red', linestyle=':' )
#########################
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=kwargs.get("color", "k"))
if verbose:
print("Quantiles:")
print(zip(quantiles, qvalues))
# Set up the axes.
ax.set_xlim(extents[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(5))
# Not so DRY.
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i])
ax.xaxis.set_label_coords(0.5, -0.3)
for j, y in enumerate(xs):
ax = axes[i, j]
if j > i:
ax.set_visible(False)
ax.set_frame_on(False)
continue
elif j == i:
continue
hist2d(y, x, ax=ax, extent=[extents[j], extents[i]],
plot_contours=plot_contours,
plot_datapoints=plot_datapoints,
weights=weights, **kwargs)
if truths is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
ax.axvline(truths[j], color=truth_color)
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j])
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.3, 0.5)
######################
# Added by GV
######################
nax = len( logbins )
for i,ll in enumerate( logbins ):
if ll:
#All plots on the i-th line need a
#log y-axis
for j in range( nax ):
#Do not log scale the y scale of the
#histogram
if j < i:
axes[i,j].set_yscale("symlog", linthreshy=1e-4)
#Remove tick labels from all but the first
#plot
if j > 0:
axes[i,j].set_yticklabels([])
#All plots on the j-th column need
#a log x-axis
for j in range( nax ):
axes[j,i].set_xscale("symlog", linthreshx=1e-4)
#Remove tick labels from all but the last
#plot
if j < nax - 1:
axes[j,i].set_xticklabels([])
return fig
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
def error_ellipse(mu, cov, ax=None, factor=1.0, **kwargs):
"""
Plot the error ellipse at a point given its covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]) * factor,
height=2 * np.sqrt(S[1]) * factor,
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = pl.gca()
ax.add_patch(ellipsePlot)
return ellipsePlot
def hist2d(x, y, *args, **kwargs):
"""
Plot a 2-D histogram of samples.
"""
ax = kwargs.pop("ax", pl.gca())
extent = kwargs.pop("extent", [[x.min(), x.max()], [y.min(), y.max()]])
bins = kwargs.pop("bins", 50)
color = kwargs.pop("color", "k")
linewidths = kwargs.pop("linewidths", None)
plot_datapoints = kwargs.get("plot_datapoints", True)
plot_contours = kwargs.get("plot_contours", True)
cmap = cm.get_cmap("gray")
cmap._init()
cmap._lut[:-3, :-1] = 0.
cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)
X = np.linspace(extent[0][0], extent[0][1], bins + 1)
Y = np.linspace(extent[1][0], extent[1][1], bins + 1)
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y),
weights=kwargs.get('weights', None))
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"`extent` argument.")
#V = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
V = [0.68268949213708585, 0.95449973610364158, 0.99730020393673979]
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
X, Y = X[:-1], Y[:-1]
if plot_datapoints:
ax.plot(x, y, "o", color=color, ms=1.5, zorder=-1, alpha=0.1,
rasterized=True)
if plot_contours:
ax.contourf(X1, Y1, H.T, [V[-1], H.max()],
cmap=LinearSegmentedColormap.from_list("cmap",
([1] * 3,
[1] * 3),
N=2), antialiased=False)
if plot_contours:
ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)
ax.contour(X1, Y1, H.T, V, colors=color, linewidths=linewidths)
data = np.vstack([x, y])
mu = np.mean(data, axis=1)
cov = np.cov(data)
if kwargs.pop("plot_ellipse", False):
error_ellipse(mu, cov, ax=ax, edgecolor="r", ls="dashed")
ax.set_xlim(extent[0])
ax.set_ylim(extent[1])
| bsd-3-clause |
foolcage/fooltrader | fooltrader/proxy/__init__.py | 1 | 2219 | # -*- coding: utf-8 -*-
import os
import pandas as pd
from fooltrader import settings
# 获取存档的代理列表
def get_proxy_dir():
return os.path.join(settings.FOOLTRADER_STORE_PATH, "proxy")
def get_proxy_path(protocol='http'):
return os.path.join(get_proxy_dir(), "{}_proxy.csv".format(protocol))
def get_checked_proxy_dir(part_name=None):
if part_name:
return os.path.join(get_proxy_dir(), 'checked', 'tmp')
else:
return os.path.join(get_proxy_dir(), 'checked')
def get_checked_proxy_path(protocol='http', part_name=None):
if not os.path.exists(get_checked_proxy_dir(part_name)):
os.makedirs(get_checked_proxy_dir(part_name))
if part_name:
return os.path.join(get_checked_proxy_dir(part_name), "{}_{}_proxy.csv".format(protocol, part_name))
else:
return os.path.join(get_checked_proxy_dir(), "{}_proxy.csv".format(protocol))
def get_sorted_proxy_dir(domain):
return os.path.join(get_proxy_dir(), domain)
def get_sorted_proxy_path(domain, protocol='http', part_name=None):
if not os.path.exists(get_sorted_proxy_dir(domain)):
os.makedirs(get_sorted_proxy_dir(domain))
if part_name:
return os.path.join(get_sorted_proxy_dir(domain), "tmp", "{}_{}_proxy.csv".format(protocol, part_name))
else:
return os.path.join(get_sorted_proxy_dir(domain), "{}_proxy.csv".format(protocol))
def get_checked_proxy(domain=None, protocol='http'):
if domain and os.path.exists(get_sorted_proxy_path(domain, protocol=protocol)):
return pd.read_csv(get_sorted_proxy_path(domain, protocol))
if os.path.exists(get_checked_proxy_path(protocol)):
return pd.read_csv(get_checked_proxy_path(protocol))
def get_proxy(protocol='http'):
if os.path.exists(get_proxy_path(protocol)):
return pd.read_csv(get_proxy_path(protocol))
else:
return pd.DataFrame()
def save_proxy(proxies, protocol='http'):
proxy_df = get_proxy(protocol)
proxy_df = proxy_df.append(proxies)
proxy_df.drop_duplicates(subset=('url'), keep='last')
proxy_df.to_csv(get_proxy_path(protocol), index=False)
if not os.path.exists(get_proxy_dir()):
os.makedirs(get_proxy_dir())
| mit |
mavenlin/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 71 | 12923 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
mantidproject/mantid | qt/python/mantidqt/project/test/test_plotsloader.py | 3 | 15349 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
import unittest # noqa
from unittest import mock # noqa
import matplotlib
import matplotlib.axis
import matplotlib.pyplot as plt # noqa
import matplotlib.figure # noqa
import matplotlib.text # noqa
from matplotlib.ticker import LogFormatterSciNotation, ScalarFormatter, NullFormatter
matplotlib.use('AGG')
from mantidqt.project.plotsloader import PlotsLoader # noqa
import mantid.plots.axesfunctions # noqa
from mantid.api import AnalysisDataService as ADS # noqa
from mantid.dataobjects import Workspace2D # noqa
def pass_func():
pass
class PlotsLoaderTest(unittest.TestCase):
def setUp(self):
self.plots_loader = PlotsLoader()
plt.plot = mock.MagicMock()
mantid.plots.axesfunctions.plot = mock.MagicMock()
self.dictionary = {u'legend': {u'exists': False}, u'lines': [],
u'properties': {u'axisOn': True, u'bounds': (0.0, 0.0, 0.0, 0.0), u'dynamic': True,
u'frameOn': True, u'visible': True,
u'xAxisProperties': {u'fontSize': 10.0,
u'gridStyle': {u'gridOn': False},
u'majorTickFormat': None,
u'majorTickFormatter': 'ScalarFormatter',
u'majorTickLocator': 'AutoLocator',
u'majorTickLocatorValues': None,
u'minorTickFormat': None,
u'minorTickFormatter': 'NullFormatter',
u'minorTickLocator': 'NullLocator',
u'minorTickLocatorValues': None,
u'visible': True},
u'xAxisScale': u'linear', u'xLim': (0.0, 1.0),
u'yAxisProperties': {u'fontSize': 10.0,
u'gridStyle': {u'gridOn': False},
u'majorTickFormat': None,
u'majorTickFormatter': 'ScalarFormatter',
u'majorTickLocator': 'AutoLocator',
u'majorTickLocatorValues': None,
u'minorTickFormat': None,
u'minorTickFormatter': 'NullFormatter',
u'minorTickLocator': 'NullLocator',
u'minorTickLocatorValues': None,
u'visible': True},
u'yAxisScale': u'linear', u'yLim': (0.0, 1.0), u'showMinorGrid': False,
u"xAutoScale": False, u"yAutoScale": False,
u'tickParams': {
'xaxis': {
'major': {
'bottom': True,
'top': True,
'labelbottom': True,
'labeltop': True,
'direction': 'inout',
'width': 1,
'size': 6},
'minor': {
'bottom': True,
'top': True,
'labelbottom': True,
'labeltop': True,
'direction': 'inout',
'width': 1,
'size': 3}},
'yaxis': {
'major': {
'left': True,
'right': True,
'labelleft': True,
'labelright': True,
'direction': 'inout',
'width': 1, 'size': 6},
'minor': {
'left': True,
'right': True,
'labelleft': True,
'labelright': True,
'direction': 'inout',
'width': 1,
'size': 3}}},
u'spineWidths': {'left': 0.4, 'right': 0.4, 'bottom': 0.4, 'top': 0.4}},
u'textFromArtists': {}, u'texts': [], u'title': u'', u'xAxisTitle': u'', u'yAxisTitle': u''}
def test_load_plots_does_the_right_calls(self):
self.plots_loader.make_fig = mock.MagicMock()
self.plots_loader.load_plots(["plot1", "plot2"])
self.assertEqual(self.plots_loader.make_fig.call_count, 2)
@mock.patch("matplotlib.figure.Figure.show")
def test_make_fig_makes_the_right_calls(self, pass_func):
ws = Workspace2D()
ADS.add("ws", ws)
plot_dict = {"label": "plot", "creationArguments": [[
{"workspaces": "ws", "wkspIndex": 0},
{"function": "axhline", "args": [10, 0, 1], "kwargs": {}},
{"function": "axvline", "args": [], "kwargs": {"x": 0, "ymin": 0, "ymax": 1}}
]]}
self.plots_loader.workspace_plot_func = mock.MagicMock()
self.plots_loader.plot_func = mock.MagicMock()
self.plots_loader.restore_figure_data = mock.MagicMock()
self.plots_loader.make_fig(plot_dict)
self.assertEqual(self.plots_loader.workspace_plot_func.call_count, 1)
self.assertEqual(self.plots_loader.plot_func.call_count, 2)
self.assertEqual(self.plots_loader.restore_figure_data.call_count, 1)
def test_restore_fig_properties(self):
matplotlib.figure.Figure.set_figheight = mock.MagicMock()
matplotlib.figure.Figure.set_figwidth = mock.MagicMock()
matplotlib.figure.Figure.set_dpi = mock.MagicMock()
self.plots_loader.restore_fig_properties(matplotlib.figure.Figure(), {"figHeight": 1, "figWidth": 1, "dpi": 1})
self.assertEqual(matplotlib.figure.Figure.set_figheight.call_count, 1)
self.assertEqual(matplotlib.figure.Figure.set_figwidth.call_count, 1)
self.assertEqual(matplotlib.figure.Figure.set_dpi.call_count, 1)
def test_restore_fig_axes(self):
self.plots_loader.update_properties = mock.MagicMock()
self.plots_loader.update_lines = mock.MagicMock()
self.plots_loader.create_text_from_dict = mock.MagicMock()
self.plots_loader.update_legend = mock.MagicMock()
fig = matplotlib.figure.Figure()
self.plots_loader.restore_fig_axes(matplotlib.axes.Axes(fig=fig, rect=[0, 0, 0, 0]), self.dictionary)
self.assertEqual(self.plots_loader.update_properties.call_count, 1)
self.assertEqual(self.plots_loader.update_lines.call_count, 0)
self.assertEqual(self.plots_loader.create_text_from_dict.call_count, 0)
self.assertEqual(self.plots_loader.update_legend.call_count, 1)
def test_update_properties_limits(self):
dic = self.dictionary[u"properties"]
dic.pop("spineWidths", None) # Not needed for this test and causes error on mock.
mock_ax = mock.Mock()
plots_loader = self.plots_loader
with mock.patch.object(plots_loader, "update_axis", mock.Mock()):
plots_loader.update_properties(mock_ax, dic)
mock_ax.set_xlim.assert_called_once_with(dic['xLim'])
mock_ax.set_xlim.assert_called_once_with(dic['yLim'])
def test_update_properties_limits_autoscale(self):
dic = self.dictionary[u"properties"]
dic.pop("spineWidths", None) # Not needed for this test and causes error on mock.
dic.update({"xAutoScale": True, "yAutoScale": True})
mock_ax = mock.Mock()
plots_loader = self.plots_loader
with mock.patch.object(plots_loader, "update_axis", mock.Mock()):
plots_loader.update_properties(mock_ax, dic)
mock_ax.autoscale.assert_has_calls([mock.call(True, axis="x"), mock.call(True, axis="y")])
mock_ax.set_xlim.assert_not_called()
mock_ax.set_xlim.assert_not_called()
def test_update_properties_sets_tick_params_and_spine_widths(self):
dic = self.dictionary[u"properties"]
mock_ax = mock.Mock()
mock_ax.spines = {
"left": mock.Mock(),
"right": mock.Mock(),
"top": mock.Mock(),
"bottom": mock.Mock(),
}
plots_loader = self.plots_loader
with mock.patch.object(plots_loader, "update_axis", mock.Mock()):
plots_loader.update_properties(mock_ax, dic)
mock_ax.xaxis.set_tick_params.assert_has_calls([
mock.call(which="major", **dic["tickParams"]["xaxis"]["major"]),
mock.call(which="minor", **dic["tickParams"]["xaxis"]["minor"])
])
mock_ax.yaxis.set_tick_params.assert_has_calls([
mock.call(which="major", **dic["tickParams"]["yaxis"]["major"]),
mock.call(which="minor", **dic["tickParams"]["yaxis"]["minor"])
])
for (spine_name, mock_spine) in mock_ax.spines.items():
mock_spine.set_linewidth.assert_called_with(dic["spineWidths"][spine_name])
def test_create_text_from_dict(self):
fig = matplotlib.figure.Figure()
ax = matplotlib.axes.Axes(fig=fig, rect=[0, 0, 0, 0])
ax.text = mock.MagicMock()
self.plots_loader.create_text_from_dict(ax=ax, dic={"text": "text", "position": (1, 1), "useTeX": 1,
"style": {"alpha": 1, "textSize": 1, "color": 1,
"hAlign": 1, "vAlign": 1, "rotation": 1,
"zOrder": 1}})
self.assertEqual(ax.text.call_count, 1)
ax.text.assert_called_once_with(fontdict={u'zorder': 1, u'fontsize': 1, u'color': 1, u'alpha': 1,
u'rotation': 1, u'verticalalignment': 1, u'usetex': 1,
u'horizontalalignment': 1}, s=u'text', x=1, y=1)
@mock.patch("matplotlib.figure.Figure.show")
def test_load_plot_from_dict(self, pass_func):
# The fact this runs is the test
self.plots_loader.load_plots([self.dictionary])
def test_update_axis_ticks_format(self):
fig, ax = plt.subplots()
x_axis = ax.xaxis
x_axis.set_major_formatter(LogFormatterSciNotation())
x_axis.set_minor_formatter(LogFormatterSciNotation())
PlotsLoader.update_axis_ticks(x_axis, self.dictionary['properties']['xAxisProperties'])
self.assertIsInstance(x_axis.get_major_formatter(), ScalarFormatter)
self.assertIsInstance(x_axis.get_minor_formatter(), NullFormatter)
@mock.patch("matplotlib.colors.LogNorm", autospec=True)
def test_restore_normalise_obj_from_dict_creates_correct_norm_instance_from_supported_norm(self, mock_LogNorm):
norm_dict = {'type': 'LogNorm', 'vmin': 1, 'vmax': 2, 'clip': True}
_ = self.plots_loader.restore_normalise_obj_from_dict(norm_dict)
mock_LogNorm.assert_called_once_with(norm_dict['vmin'], norm_dict['vmax'], norm_dict['clip'])
def test_restore_normalise_obj_from_dict_returns_none_with_unsupported_norm(self):
norm_dict = {'type': 'unsupported_norm', 'vmin': 1, 'vmax': 2, 'clip': True}
return_value = self.plots_loader.restore_normalise_obj_from_dict(norm_dict)
self.assertIsNone(return_value)
@mock.patch("matplotlib.colors.Normalize", autospec=True)
def test_restore_normalise_obj_from_dict_returns_Normalize_type_with_unspecified_norm(self, mock_Normalize):
"""If the type of the norm is unspecified, the method should return a Normalize object,
which is the most general norm and is subclassed by LogNorm, etc."""
norm_dict = {'vmin': 1, 'vmax': 2, 'clip': True}
_ = self.plots_loader.restore_normalise_obj_from_dict(norm_dict)
mock_Normalize.assert_called_once_with(norm_dict['vmin'], norm_dict['vmax'], norm_dict['clip'])
def test_update_properties_without_spineWidths(self):
# Test that old versions of the .mtdproj file that don't include spine widths
# still load. The fact this runs is the test
props = self.dictionary['properties']
props.pop("spineWidths")
mock_ax = mock.Mock()
plots_loader = self.plots_loader
with mock.patch.object(plots_loader, "update_axis", mock.Mock()):
plots_loader.update_properties(mock_ax, props)
def test_update_axis_with_old_tick_position(self):
# Test that old versions of the .mtdproj file that represented which side of the
# plot had ticks differently.
mock_xaxis = mock.MagicMock(spec=matplotlib.axis.XAxis)
mock_yaxis = mock.MagicMock(spec=matplotlib.axis.YAxis)
mock_ax = mock.MagicMock()
mock_ax.xaxis = mock_xaxis
mock_ax.yaxis = mock_yaxis
dic = self.dictionary["properties"]
dic.pop("tickParams")
dic["xAxisProperties"]["position"] = "top"
dic["yAxisProperties"]["position"] = "right"
self.plots_loader.update_properties(mock_ax, dic)
mock_ax.set_tick_params.assert_not_called()
mock_xaxis.tick_top.assert_called()
mock_yaxis.tick_right.assert_called()
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
nushio3/UFCORIN | script/review-forecast.py | 1 | 3318 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import astropy.time as time
import datetime
import pickle
import subprocess
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
class Forecast:
pass
def discrete_t(t):
epoch = datetime.datetime(2011,1,1)
dt = t - epoch
return epoch + datetime.timedelta(seconds = int(dt.total_seconds()/720)*720)
filename = 'review-forecast.png'
fig, ax = plt.subplots() # plt.subplots(figsize=mpl.figure.figaspect(0.3))
ax.set_yscale('log')
demo_mode = False
now = time.Time(datetime.datetime.now(),format='datetime',scale='utc').tai.datetime
if demo_mode:
now = time.Time(datetime.datetime(2016,5,11),format='datetime',scale='utc').tai.datetime
t = now
ts = [now]
for i in range(1):
t -= datetime.timedelta(days=28)
ts.append(t)
ts.reverse()
pats = []
for t in ts:
for d10 in range(4):
pats.append('archive/{:04}/{:02}/{}*/*'.format(t.year,t.month,d10))
goes_curve_max = {}
f = None
for pat in pats:
print "loading " + pat
proc = subprocess.Popen('ls ' + pat, shell = True, stdout=subprocess.PIPE)
for fn in proc.stdout:
with(open(fn.strip(),'r')) as fp:
try:
f = pickle.load(fp)
# supress daily forecast bar
# ax.plot(f.pred_curve_t, f.pred_curve_y, color=(0,0.7,0), lw=0.1)
ax.plot(f.pred_max_t[23][0], f.pred_max_y[23][0], 'mo', markersize=2.0, markeredgecolor='r', zorder = 300)
except:
continue
if f is None:
continue
for i in range(len(f.goes_curve_t)):
t = f.goes_curve_t[i]
y = f.goes_curve_y[i]
for j in range(-1,120):
t2 = discrete_t(t - datetime.timedelta(seconds=j*720))
try:
y2 = goes_curve_max[t2]
goes_curve_max[t2] = max(y2, y)
except:
goes_curve_max[t2] = y
ax.plot(f.goes_curve_t, f.goes_curve_y, color=(0.66,0.66,1), lw=1.5, zorder = 200)
ax.plot(f.goes_curve_t, f.goes_curve_y, color=(0,0,1), lw=1, zorder = 201)
gmdata = sorted(goes_curve_max.items())
ax.plot([kv[0] for kv in gmdata], [kv[1] for kv in gmdata], color=(1,0.75,0.75), lw=2, zorder = 100)
days = mdates.DayLocator() # every day
daysFmt = mdates.DateFormatter('%Y-%m-%d')
hours = mdates.HourLocator()
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(daysFmt)
ax.xaxis.set_minor_locator(hours)
ax.grid()
fig.autofmt_xdate()
ax.set_title('GOES Forecast till {}(TAI)'.format(now.strftime('%Y-%m-%d %H:%M:%S')))
ax.set_xlabel('International Atomic Time')
ax.set_ylabel(u'GOES Long[1-8Å] Xray Flux (W/m²)')
if demo_mode:
ax.set_xlim([now-datetime.timedelta(days=9), now+datetime.timedelta(days=1)])
ax.set_ylim([5e-8, 1e-5])
else:
ax.set_xlim([now-datetime.timedelta(days=16), now+datetime.timedelta(days=1)])
ax.set_ylim([5e-8, 1e-3])
plt.text(now+datetime.timedelta(days=1), 5e-4, 'X-class', rotation=90)
plt.text(now+datetime.timedelta(days=1), 5e-5, 'M-class', rotation=90)
plt.text(now+datetime.timedelta(days=1), 5e-6, 'C-class', rotation=90)
plt.text(now+datetime.timedelta(days=1), 5e-7, 'B-class', rotation=90)
plt.savefig(filename, dpi=200)
plt.close('all')
| mit |
dmires/ThinkStats2 | code/brfss.py | 69 | 4708 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import sys
import pandas
import numpy as np
import thinkstats2
import thinkplot
def Summarize(df, column, title):
"""Print summary statistics male, female and all."""
items = [
('all', df[column]),
('male', df[df.sex == 1][column]),
('female', df[df.sex == 2][column]),
]
print(title)
print('key\tn\tmean\tvar\tstd\tcv')
for key, series in items:
mean, var = series.mean(), series.var()
std = math.sqrt(var)
cv = std / mean
t = key, len(series), mean, var, std, cv
print('%s\t%d\t%4.2f\t%4.2f\t%4.2f\t%4.4f' % t)
def CleanBrfssFrame(df):
"""Recodes BRFSS variables.
df: DataFrame
"""
# clean age
df.age.replace([7, 9], float('NaN'), inplace=True)
# clean height
df.htm3.replace([999], float('NaN'), inplace=True)
# clean weight
df.wtkg2.replace([99999], float('NaN'), inplace=True)
df.wtkg2 /= 100.0
# clean weight a year ago
df.wtyrago.replace([7777, 9999], float('NaN'), inplace=True)
df['wtyrago'] = df.wtyrago.apply(lambda x: x/2.2 if x < 9000 else x-9000)
def ReadBrfss(filename='CDBRFS08.ASC.gz', compression='gzip', nrows=None):
"""Reads the BRFSS data.
filename: string
compression: string
nrows: int number of rows to read, or None for all
returns: DataFrame
"""
var_info = [
('age', 101, 102, int),
('sex', 143, 143, int),
('wtyrago', 127, 130, int),
('finalwt', 799, 808, int),
('wtkg2', 1254, 1258, int),
('htm3', 1251, 1253, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, compression=compression, nrows=nrows)
CleanBrfssFrame(df)
return df
def MakeNormalModel(weights):
"""Plots a CDF with a Normal model.
weights: sequence
"""
cdf = thinkstats2.Cdf(weights, label='weights')
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = math.sqrt(var)
print('n, mean, std', len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label='model', linewidth=4, color='0.8')
thinkplot.Cdf(cdf)
def MakeNormalPlot(weights):
"""Generates a normal probability plot of birth weights.
weights: sequence
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color='0.8', label='model')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='weights')
def MakeFigures(df):
"""Generates CDFs and normal prob plots for weights and log weights."""
weights = df.wtkg2.dropna()
log_weights = np.log10(weights)
# plot weights on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalModel(weights)
thinkplot.Config(xlabel='adult weight (kg)', ylabel='CDF')
thinkplot.SubPlot(2)
MakeNormalModel(log_weights)
thinkplot.Config(xlabel='adult weight (log10 kg)')
thinkplot.Save(root='brfss_weight')
# make normal probability plots on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalPlot(weights)
thinkplot.Config(xlabel='z', ylabel='weights (kg)')
thinkplot.SubPlot(2)
MakeNormalPlot(log_weights)
thinkplot.Config(xlabel='z', ylabel='weights (log10 kg)')
thinkplot.Save(root='brfss_weight_normal')
def main(script, nrows=1000):
"""Tests the functions in this module.
script: string script name
"""
thinkstats2.RandomSeed(17)
nrows = int(nrows)
df = ReadBrfss(nrows=nrows)
MakeFigures(df)
Summarize(df, 'htm3', 'Height (cm):')
Summarize(df, 'wtkg2', 'Weight (kg):')
Summarize(df, 'wtyrago', 'Weight year ago (kg):')
if nrows == 1000:
assert(df.age.value_counts()[40] == 28)
assert(df.sex.value_counts()[2] == 668)
assert(df.wtkg2.value_counts()[90.91] == 49)
assert(df.wtyrago.value_counts()[160/2.2] == 49)
assert(df.htm3.value_counts()[163] == 103)
assert(df.finalwt.value_counts()[185.870345] == 13)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
qpxu007/luigi | examples/pyspark_wc.py | 56 | 3361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
alvarofierroclavero/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
jjx02230808/project0223 | sklearn/decomposition/pca.py | 9 | 23163 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 2 by default.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
iismd17/scikit-learn | sklearn/utils/__init__.py | 79 | 14202 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
iamkingmaker/zipline | tests/test_batchtransform.py | 18 | 9891 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import pytz
import numpy as np
import pandas as pd
from datetime import datetime
from unittest import TestCase
from zipline.utils.test_utils import setup_logger, teardown_logger
from zipline.sources.data_source import DataSource
import zipline.utils.factory as factory
from zipline.transforms import batch_transform
from zipline.test_algorithms import (BatchTransformAlgorithm,
BatchTransformAlgorithmMinute,
ReturnPriceBatchTransform)
from zipline.algorithm import TradingAlgorithm
from zipline.utils.tradingcalendar import trading_days
from copy import deepcopy
@batch_transform
def return_price(data):
return data.price
class BatchTransformAlgorithmSetSid(TradingAlgorithm):
def initialize(self, sids=None):
self.history = []
self.batch_transform = return_price(
refresh_period=1,
window_length=10,
clean_nans=False,
sids=sids,
compute_only_full=False
)
def handle_data(self, data):
self.history.append(
deepcopy(self.batch_transform.handle_data(data)))
class DifferentSidSource(DataSource):
def __init__(self):
self.dates = pd.date_range('1990-01-01', periods=180, tz='utc')
self.start = self.dates[0]
self.end = self.dates[-1]
self._raw_data = None
self.sids = range(90)
self.sid = 0
self.trading_days = []
@property
def instance_hash(self):
return '1234'
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
def raw_data_gen(self):
# Create differente sid for each event
for date in self.dates:
if date not in trading_days:
continue
event = {'dt': date,
'sid': self.sid,
'price': self.sid,
'volume': self.sid}
self.sid += 1
self.trading_days.append(date)
yield event
class TestChangeOfSids(TestCase):
def setUp(self):
self.sids = range(90)
self.sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1990, 1, 8, tzinfo=pytz.utc)
)
def test_all_sids_passed(self):
algo = BatchTransformAlgorithmSetSid(
sim_params=self.sim_params,
identifiers=[i for i in range(0, 90)]
)
source = DifferentSidSource()
algo.run(source)
for i, (df, date) in enumerate(zip(algo.history, source.trading_days)):
self.assertEqual(df.index[-1], date, "Newest event doesn't \
match.")
for sid in self.sids[:i]:
self.assertIn(sid, df.columns)
self.assertEqual(df.iloc[-1].iloc[-1], i)
class TestBatchTransformMinutely(TestCase):
def setUp(self):
setup_logger(self)
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
self.sim_params = factory.create_simulation_parameters(
start=start,
end=end,
)
self.sim_params.emission_rate = 'daily'
self.sim_params.data_frequency = 'minute'
self.source, self.df = \
factory.create_test_df_source(bars='minute')
def tearDown(self):
teardown_logger(self)
def test_core(self):
algo = BatchTransformAlgorithmMinute(sim_params=self.sim_params)
algo.run(self.source)
wl = int(algo.window_length * 6.5 * 60)
for bt in algo.history[wl:]:
self.assertEqual(len(bt), wl)
def test_window_length(self):
algo = BatchTransformAlgorithmMinute(sim_params=self.sim_params,
window_length=1, refresh_period=0)
algo.run(self.source)
wl = int(algo.window_length * 6.5 * 60)
np.testing.assert_array_equal(algo.history[:(wl - 1)],
[None] * (wl - 1))
for bt in algo.history[wl:]:
self.assertEqual(len(bt), wl)
class TestBatchTransform(TestCase):
def setUp(self):
setup_logger(self)
self.sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1990, 1, 8, tzinfo=pytz.utc)
)
self.source, self.df = \
factory.create_test_df_source(self.sim_params)
def tearDown(self):
teardown_logger(self)
def test_core_functionality(self):
algo = BatchTransformAlgorithm(sim_params=self.sim_params)
algo.run(self.source)
wl = algo.window_length
# The following assertion depend on window length of 3
self.assertEqual(wl, 3)
# If window_length is 3, there should be 2 None events, as the
# window fills up on the 3rd day.
n_none_events = 2
self.assertEqual(algo.history_return_price_class[:n_none_events],
[None] * n_none_events,
"First two iterations should return None." + "\n" +
"i.e. no returned values until window is full'" +
"%s" % (algo.history_return_price_class,))
self.assertEqual(algo.history_return_price_decorator[:n_none_events],
[None] * n_none_events,
"First two iterations should return None." + "\n" +
"i.e. no returned values until window is full'" +
"%s" % (algo.history_return_price_decorator,))
# After three Nones, the next value should be a data frame
self.assertTrue(isinstance(
algo.history_return_price_class[wl],
pd.DataFrame)
)
# Test whether arbitrary fields can be added to datapanel
field = algo.history_return_arbitrary_fields[-1]
self.assertTrue(
'arbitrary' in field.items,
'datapanel should contain column arbitrary'
)
self.assertTrue(all(
field['arbitrary'].values.flatten() ==
[123] * algo.window_length),
'arbitrary dataframe should contain only "test"'
)
for data in algo.history_return_sid_filter[wl:]:
self.assertIn(0, data.columns)
self.assertNotIn(1, data.columns)
for data in algo.history_return_field_filter[wl:]:
self.assertIn('price', data.items)
self.assertNotIn('ignore', data.items)
for data in algo.history_return_field_no_filter[wl:]:
self.assertIn('price', data.items)
self.assertIn('ignore', data.items)
for data in algo.history_return_ticks[wl:]:
self.assertTrue(isinstance(data, deque))
for data in algo.history_return_not_full:
self.assertIsNot(data, None)
# test overloaded class
for test_history in [algo.history_return_price_class,
algo.history_return_price_decorator]:
# starting at window length, the window should contain
# consecutive (of window length) numbers up till the end.
for i in range(algo.window_length, len(test_history)):
np.testing.assert_array_equal(
range(i - algo.window_length + 2, i + 2),
test_history[i].values.flatten()
)
def test_passing_of_args(self):
algo = BatchTransformAlgorithm(1, kwarg='str',
sim_params=self.sim_params)
algo.run(self.source)
self.assertEqual(algo.args, (1,))
self.assertEqual(algo.kwargs, {'kwarg': 'str'})
expected_item = ((1, ), {'kwarg': 'str'})
self.assertEqual(
algo.history_return_args,
[
# 1990-01-01 - market holiday, no event
# 1990-01-02 - window not full
None,
# 1990-01-03 - window not full
None,
# 1990-01-04 - window now full, 3rd event
expected_item,
# 1990-01-05 - window now full
expected_item,
# 1990-01-08 - window now full
expected_item
])
def run_batchtransform(window_length=10):
sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1995, 1, 8, tzinfo=pytz.utc)
)
source, df = factory.create_test_df_source(sim_params)
return_price_class = ReturnPriceBatchTransform(
refresh_period=1,
window_length=window_length,
clean_nans=False
)
for raw_event in source:
raw_event['datetime'] = raw_event.dt
event = {0: raw_event}
return_price_class.handle_data(event)
| apache-2.0 |
schets/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
Winand/pandas | pandas/tests/io/parser/quoting.py | 18 | 5813 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0001'))
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
sgenoud/scikit-learn | examples/cluster/plot_affinity_propagation.py | 3 | 2494 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print __doc__
import numpy as np
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5)
##############################################################################
# Compute similarities
X_norms = np.sum(X ** 2, axis=1)
S = - X_norms[:, np.newaxis] - X_norms[np.newaxis, :] + 2 * np.dot(X, X.T)
p = 10 * np.median(S)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation().fit(S, p)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print 'Estimated number of clusters: %d' % n_clusters_
print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)
print "Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)
print "V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)
print "Adjusted Rand Index: %0.3f" % \
metrics.adjusted_rand_score(labels_true, labels)
print "Adjusted Mutual Information: %0.3f" % \
metrics.adjusted_mutual_info_score(labels_true, labels)
D = (S / np.min(S))
print ("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(D, labels, metric='precomputed'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
pl.plot(X[class_members, 0], X[class_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
pl.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| bsd-3-clause |
aM3z/titanic | model/test.py | 1 | 3411 | import features
from tree import DecisionTreeClassifier as our_tree
from sklearn import tree as py_tree
from sklearn.ensemble import BaggingClassifier
import argparse
RESULTS_DIR = './results/testing/'
def tree(max_depth, start_min_sample_size, end_min_sample_size):
X,y = features.get_data()
# use first 0.7 for training
X_train, y_train = X[:624], y[:624]
X_test, y_test = X[624:], y[624:]
results_filename = RESULTS_DIR + "tree.csv"
with open(results_filename, "a") as f:
for s in range(start_min_sample_size, end_min_sample_size + 1):
clf = our_tree(max_depth,s)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
result = "%d, %d, %f\n" % (max_depth, s, score)
print(result)
f.write(result)
def bagging(n_estimators, random_state, start_max_depth, end_max_depth, start_min_sample_size, end_min_sample_size):
X,y = features.get_data()
# use first 0.7 for training
X_train, y_train = X[:624], y[:624]
X_test, y_test = X[624:], y[624:]
results_filename = RESULTS_DIR + "bagging.csv"
with open(results_filename, "a") as f:
for d in range(start_max_depth, end_max_depth + 1):
for s in range(start_min_sample_size, end_min_sample_size + 1):
their_tree = py_tree.DecisionTreeClassifier(criterion='entropy', max_depth=d,random_state=random_state, min_samples_split=s)
clf = BaggingClassifier(base_estimator=their_tree, n_estimators=n_estimators, random_state=random_state)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
result = "%d, %d, %d, %f\n" % (n_estimators, d, s, score)
print(result)
f.write(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train Decision Tree and Bagging Classifiers")
parser.add_argument('-m', metavar='model', dest='model', type=int, nargs='?', help='1 for bagging, 2 for tree, 3 for bagging and tree')
parser.add_argument('-n', metavar='estimators', dest='n', type=int, nargs='?', help='number of estimators used for bagging')
parser.add_argument('-r', metavar='random_state', dest='rand', type=int, nargs='?', help='random state')
parser.add_argument('-d', metavar='start_max_depth', dest='d', type=int, nargs='?', help='start max tree depth')
parser.add_argument('-e', metavar='end_max_depth', dest='e', type=int, nargs='?', help='end max tree depth')
parser.add_argument('-s', metavar='start_min_sample_size', dest='s', type=int, nargs='?', help='start min sample size')
parser.add_argument('-t', metavar='end_min_sample_size', dest='t', type=int, nargs='?', help='end min sample size')
args = parser.parse_args()
if args.model == 1:
bagging(n_estimators=args.n, random_state=args.rand, start_max_depth=args.d, end_max_depth=args.e, start_min_sample_size=args.s, end_min_sample_size=args.t)
elif args.model == 2:
tree(max_depth=args.d, start_min_sample_size=args.s, end_min_sample_size=args.t)
elif args.model == 3:
bagging(n_estimators=args.n, random_state=args.rand, start_max_depth=args.d, end_max_depth=args.e, start_min_sample_size=args.s, end_min_sample_size=args.t)
tree(max_depth=args.d, start_min_sample_size=args.s, end_min_sample_size=args.t)
| mit |
ishanic/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
ddemidov/amgcl | docs/conf.py | 1 | 2173 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import sys
import os
sys.path.append('..')
from git_version import git_version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- Project information -----------------------------------------------------
project = 'AMGCL'
copyright = '2012-2021, Denis Demidov <[email protected]>'
author = 'Denis Demidov'
version = git_version()
release = version
master_doc = 'index'
numfig = True
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax', 'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default' if on_rtd else 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
'fncychap': '\\usepackage[Sonny]{fncychap}',
'extraclassoptions': 'openany,oneside',
}
latex_documents = [
(master_doc, 'AMGCL.tex', 'AMGCL Documentation',
'Denis Demidov', 'manual'),
]
| mit |
adelomana/30sols | F3.RPG.regulation/panel.a/panel.a.py | 1 | 5581 | ###
### this script creates a heatmap of ribosomal protein expression
###
import sys,numpy,copy
import matplotlib,matplotlib.pyplot
import scipy,scipy.stats
import statsmodels,statsmodels.nonparametric,statsmodels.nonparametric.smoothers_lowess
matplotlib.rcParams.update({'font.size':18,'font.family':'Arial','xtick.labelsize':14,'ytick.labelsize':14})
matplotlib.rcParams['pdf.fonttype']=42
def expressionReader():
'''
this function creates a dictionary for expression values as
expression[trna/rbf][ribo-pt gene name][timepoint][replicate]=value
'''
expression={}
geneNames=[]
timepoints=[]
replicates=[]
for sampleType in sampleTypes:
expressionDataFile=expressionDataDir+'normalizedCounts.{}.csv'.format(sampleType)
with open(expressionDataFile,'r') as f:
firstLine=f.readline()
header=firstLine.split(',')
sampleNames=header[1:]
sampleNames[-1]=sampleNames[-1].replace('\n','')
for line in f:
vector=line.split(',')
# geneName
geneName=vector[0]
if geneName in riboPtNames:
# gene Names
if geneName not in geneNames:
geneNames.append(geneName)
for i in range(len(vector)-1):
# timepoint
timepoint='tp.{}'.format(int(sampleNames[i].split('.')[-1]))
if timepoint not in timepoints:
timepoints.append(timepoint)
# replicate
replicate='rep.{}'.format(int(sampleNames[i].split('rep.')[1][0]))
if replicate not in replicates:
replicates.append(replicate)
# value
value=float(vector[i+1])
# make sure keys exist
if sampleType not in expression.keys():
expression[sampleType]={}
if geneName not in expression[sampleType].keys():
expression[sampleType][geneName]={}
if timepoint not in expression[sampleType][geneName].keys():
expression[sampleType][geneName][timepoint]={}
expression[sampleType][geneName][timepoint][replicate]=value
# sort variables
sampleTypes.sort()
geneNames.sort()
timepoints.sort()
replicates.sort()
return expression,sampleTypes,timepoints,replicates
def riboPtNamesReader():
'''
this function reads the ribosomal protein names
'''
riboPtNames=[]
with open(ribosomalProteinsFile,'r') as f:
next(f)
for line in f:
vector=line.split('\t')
riboPtNames.append(vector[0])
return riboPtNames
###
### MAIN
###
# 0. user defined variables
expressionDataDir='/Volumes/omics4tb/alomana/projects/TLR/data/DESeq2/'
ribosomalProteinsFile='/Volumes/omics4tb/alomana/projects/TLR/data/ribosomalGeneNames.txt'
scratchDir='/Volumes/omics4tb/alomana/scratch/'
theColors=['red','orange','green','blue']
sampleTypes=['trna','rbf']
# 1. read data
riboPtNames=riboPtNamesReader()
expression,sampleTypes,timepoints,replicates=expressionReader()
# 2. process data
# 2.1. empty figure calling to maintain sizes
matplotlib.pyplot.plot([0,0],[1,1],'ok')
matplotlib.pyplot.savefig('{}temp.pdf'.format(scratchDir))
matplotlib.pyplot.clf()
# 2.2. plotting figures
allx=[]; ally=[]
for timepoint in timepoints:
x=[]; y=[]
for name in riboPtNames:
valuesRNA=[]
valuesRibo=[]
for replicate in replicates:
value=expression['trna'][name][timepoint][replicate]
valuesRNA.append(value)
value=expression['rbf'][name][timepoint][replicate]
valuesRibo.append(value)
averageRNA=numpy.mean(valuesRNA)
averageRibo=numpy.mean(valuesRibo)
x.append(averageRNA)
y.append(averageRibo)
# add to all time points variable
for element in x:
allx.append(element)
for element in y:
ally.append(element)
print(len(allx),len(ally))
# 2.3. plotting dots
theColor=theColors[int(timepoint[-1])-1]
matplotlib.pyplot.plot(x,y,'o',alpha=0.5,mew=0,ms=8,color=theColor,label='TP {}'.format(timepoint[-1]))
# 2.4. compute linear regression
slope,intercept,r_value,p_value,std_err=scipy.stats.linregress(allx,ally)
print('linear regression')
print('slope',slope)
print('intercept',intercept)
print('r_value',r_value)
print('pvalue',p_value)
print('std_err',std_err)
print()
# 2.5. compute and plot linear regression line
resolution=0.1
newx=numpy.arange(min(allx),max(allx),resolution)
newy=slope*newx+intercept
idx=numpy.where(newy>0)
matplotlib.pyplot.plot(newx[idx],newy[idx],lw=4,color='black')
description='R$^2$={:.2f}\np={:.2e}\na={:.2f}'.format(r_value**2,p_value,slope)
matplotlib.pyplot.text(7,9,description)
matplotlib.pyplot.xlabel('RNA-seq, log$_2$(normalized counts)')
matplotlib.pyplot.ylabel('Ribo-seq, log$_2$(normalized counts)')
matplotlib.pyplot.yticks([4,6,8,10])
#matplotlib.pyplot.xlim([1,18])
#matplotlib.pyplot.ylim([1,18])
matplotlib.pyplot.legend(markerscale=1.5)
figureName='figure.single.pdf'
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.axes().set_aspect('equal')
matplotlib.pyplot.savefig(figureName)
matplotlib.pyplot.clf()
| gpl-3.0 |
wrobstory/seaborn | seaborn/miscplot.py | 34 | 1498 | from __future__ import division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
ax.set_xticklabels([])
ax.set_yticklabels([])
def puppyplot(grown_up=False):
"""Plot today's daily puppy. Only works in the IPython notebook."""
from .external.six.moves.urllib.request import urlopen
from IPython.display import HTML
try:
from bs4 import BeautifulSoup
url = "http://www.dailypuppy.com"
if grown_up:
url += "/dogs"
html_doc = urlopen(url)
soup = BeautifulSoup(html_doc)
puppy = soup.find("div", {"class": "daily_puppy"})
return HTML(str(puppy.img))
except ImportError:
html = ('<img src="http://cdn-www.dailypuppy.com/dog-images/'
'decker-the-nova-scotia-duck-tolling-retriever_'
'72926_2013-11-04_w450.jpg" style="width:450px;"/>')
return HTML(html)
| bsd-3-clause |
glennq/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
tasoc/photometry | tests/test_run_ffimovie.py | 1 | 1189 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests of FFI Movies.
.. codeauthor:: Rasmus Handberg <[email protected]>
"""
import pytest
import os.path
from conftest import capture_cli
import photometry.plots # noqa: F401
from matplotlib import animation
NO_FFMPEG = ('ffmpeg' not in animation.writers)
#--------------------------------------------------------------------------------------------------
@pytest.mark.ffmpeg
@pytest.mark.skipif(NO_FFMPEG, reason="FFMpeg not available")
def test_run_ffimovie(SHARED_INPUT_DIR):
out, err, exitcode = capture_cli('run_ffimovie.py', params=[
'--sector=1',
SHARED_INPUT_DIR
])
assert exitcode == 0
assert '- ERROR -' not in err
assert '- ERROR -' not in out
for fname in (
'sector001_camera3_ccd2.mp4',
'sector001_combined_backgrounds.mp4',
'sector001_combined_flags.mp4',
'sector001_combined_images.mp4',
'sector001_combined_originals.mp4'):
mp4file = os.path.join(SHARED_INPUT_DIR, fname)
assert os.path.isfile(mp4file), "MP4 was not created: " + fname
#--------------------------------------------------------------------------------------------------
if __name__ == '__main__':
pytest.main([__file__])
| gpl-3.0 |
jreeder/avoplot | examples/adv_sine_wave.py | 3 | 8650 | import numpy
import matplotlib.pyplot as plt
import math
from avoplot import plugins, series, controls, subplots
from avoplot.gui import widgets
import wx
plugin_is_GPL_compatible = True
class TrigFuncSubplot(subplots.AvoPlotXYSubplot):
def my_init(self):
"""
When defining your own subplot classes, you should not need to override
the __init__ method of the base class. Instead you should define a
my_init() method which takes no args. This will be called automatically
when the subplot is created. Use this to customise the subplot to suit
your specific needs - settings titles, axis formatters etc.
"""
#call the parent class's my_init() method. This is not required, unless
#you want to make use of any customisation done by the parent class.
#Note that this includes any control panels defined by the parent class!
super(TrigFuncSubplot, self).my_init()
#set up some axis titles
ax = self.get_mpl_axes()
ax.set_xlabel(r'$\theta$ (radians)')
ax.set_ylabel('y')
#add the units control panel to this subplot to allow the user to change
#the x-axis units.
self.add_control_panel(TrigSubplotUnitsCtrl(self))
#set the initial name of the subplot
self.set_name("Trig. Function Subplot")
class SineWaveSeries(series.XYDataSeries):
"""
Define our own data series type for Sine data. Unlike for subplots, when
defining custom data series, we do override the __init__ method.
"""
def __init__(self, *args, **kwargs):
super(SineWaveSeries, self).__init__(*args, **kwargs)
#add a control for this data series to allow the user to change the
#frequency of the wave using a slider.
self.add_control_panel(SineWaveFreqCtrl(self))
@staticmethod
def get_supported_subplot_type():
"""
This is how we restrict which data series can be plotted into which
types of subplots. Specialised subplots may provide controls for dealing
with very specific types of data - for example, our TrigFuncSubplot
allows the x-axis to be switched between degrees and radians, it would
therefore make no sense to allow time series data to be plotted into it.
However, it might make sense to allow a SineWaveSeries to be plotted
into a general AvoPlotXYSuplot, and therefore this is permitted by
AvoPlot. The rule is as follows:
A data series may be plotted into a subplot if the subplot is an
instance of the class returned by its get_supported_subplot_type()
method or any of its base classes.
"""
return TrigFuncSubplot
class AdvExamplePlugin(plugins.AvoPlotPluginSimple):
"""
This class is the same as that used for the Sine wave example, except
that we use the SineWaveSeries data series class that we defined above
rather than the generic XYDataSeries class used before.
"""
def __init__(self):
super(AdvExamplePlugin, self).__init__("Example Plugin with Controls",
SineWaveSeries)
self.set_menu_entry(['Examples', 'Adv. Sine Wave'],
"Plot a sine wave with variable frequency")
def plot_into_subplot(self, subplot):
x_data = numpy.linspace(0, 7, 500)
y_data = numpy.sin(x_data)
data_series = SineWaveSeries("adv sine wave", xdata=x_data,
ydata=y_data)
subplot.add_data_series(data_series)
return True
def rad2deg(theta, pos):
"""
Function for converting radians to degrees for use with matplotlib's
FuncFormatter object.
"""
return '%0.2f'%math.degrees(theta)
class TrigSubplotUnitsCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for trig function subplots allowing their x axis units
to be changed from radians to degrees.
"""
def __init__(self, subplot):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(TrigSubplotUnitsCtrl, self).__init__("Units")
#store the subplot object that this control panel is associated with,
#so that we can access it later
self.subplot = subplot
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(TrigSubplotUnitsCtrl, self).setup(parent)
#create a choice box for the different units for the x axis
#we use a avoplot.gui.widgets.ChoiceSetting object which is a
#thin wrapper around a wx.ChoiceBox, but provides a label and
#automatically registers the event handler.
units_choice = widgets.ChoiceSetting(self, "x-axis units:", "Radians",
["Radians", "Degrees"],
self.on_units_change)
#add the choice widget to the control panel sizer
self.Add(units_choice, 0,wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border=10)
def on_units_change(self, evnt):
"""
Event handler for change of x axis units events.
"""
#get the matplotlib axes object from the subplot
ax = self.subplot.get_mpl_axes()
#change the axis labels and label formatting based on the choice of
#units
if evnt.GetString() == 'Degrees':
ax.set_xlabel(r'$\theta$ (degrees)')
ax.xaxis.set_major_formatter(plt.FuncFormatter(rad2deg))
else:
ax.set_xlabel(r'$\theta$ (radians)')
ax.xaxis.set_major_formatter(plt.ScalarFormatter())
#draw our changes in the display
self.subplot.update()
class SineWaveFreqCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for sine wave data series allowing their frequency to
be changed using a slider.
"""
def __init__(self, series):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(SineWaveFreqCtrl, self).__init__("Freq.")
#store the data series object that this control panel is associated with,
#so that we can access it later
self.series = series
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(SineWaveFreqCtrl, self).setup(parent)
#create a label for the slider
label = wx.StaticText(self, wx.ID_ANY, 'Frequency')
self.Add(label, 0,
wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL,
border=10)
#create a frequency slider
self.slider = wx.Slider(self, wx.ID_ANY, value=1, minValue=1,
maxValue=30, style=wx.SL_LABELS)
#add the slider to the control panel's sizer
self.Add(self.slider, 0,
wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, border=10)
#register an event handler for slider change events
wx.EVT_COMMAND_SCROLL(self, self.slider.GetId(), self.on_slider_change)
def on_slider_change(self, evnt):
"""
Event handler for frequency slider change events.
"""
#change the frequency of the sine wave data accordingly
f = self.slider.GetValue()
x_data = numpy.linspace(0, 7, 2000)
y_data = numpy.sin(x_data * f)
#change the data in the series object
self.series.set_xy_data(xdata=x_data, ydata=y_data)
#draw our changes on the display
self.series.update()
#register the plugin with AvoPlot
plugins.register(AdvExamplePlugin())
| gpl-3.0 |
takuya1981/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelFrame.py | 22 | 2847 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
pYh = np.unwrap(np.angle(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
pXr = np.unwrap(np.angle(Xr[:Ns/2]))
xrw = np.real(fftshift(ifft(Xr))) * H * 2
yhw = np.real(fftshift(ifft(Yh))) * H * 2
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,2,1)
plt.plot(np.arange(M), x[pos-hM1:pos+hM2]*w, lw=1.5)
plt.axis([0, M, min(x[pos-hM1:pos+hM2]*w), max(x[pos-hM1:pos+hM2]*w)])
plt.title('x (flute-A4.wav)')
plt.subplot(3,2,3)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-90,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(3,2,5)
plt.plot(binFreq,pX,'c', lw=1.5)
plt.axis([0,maxplotfreq,0,16])
plt.plot(hfreq, hphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + harmonics')
plt.subplot(3,2,4)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.8, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.5, label='mXr')
plt.axis([0,maxplotfreq,-90,max(mYh)+2])
plt.legend(prop={'size':10})
plt.title('mYh + mXr')
plt.subplot(3,2,6)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,pYh,'c', lw=.8, label='pYh')
plt.plot(binFreq,pXr,'c', lw=1.5, label ='pXr')
plt.axis([0,maxplotfreq,-5,25])
plt.legend(prop={'size':10})
plt.title('pYh + pXr')
plt.subplot(3,2,2)
plt.plot(np.arange(Ns), yhw, 'b', lw=.8, label='yh')
plt.plot(np.arange(Ns), xrw, 'b', lw=1.5, label='xr')
plt.axis([0, Ns, min(yhw), max(yhw)])
plt.legend(prop={'size':10})
plt.title('yh + xr')
plt.tight_layout()
plt.savefig('hprModelFrame.png')
plt.show()
| agpl-3.0 |
yaukwankiu/armor | tests/scikitlearn_dbscan.py | 1 | 2490 | #http://scikit-learn.org/0.11/auto_examples/cluster/plot_dbscan.html
print __doc__
import numpy as np
from scipy.spatial import distance
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4)
##############################################################################
# Compute similarities
D = distance.squareform(distance.pdist(X))
S = 1 - (D / np.max(D))
##############################################################################
# Compute DBSCAN
db = DBSCAN().fit(S, eps=0.95, min_samples=10)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)
print "Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)
print "V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)
print "Adjusted Rand Index: %0.3f" % \
metrics.adjusted_rand_score(labels_true, labels)
print "Adjusted Mutual Information: %0.3f" % \
metrics.adjusted_mutual_info_score(labels_true, labels)
print ("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(D, labels, metric='precomputed'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
# Black removed and is used for noise instead.
colors = cycle('bgrcmybgrcmybgrcmybgrcmy')
for k, col in zip(set(labels), colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show() | cc0-1.0 |
jerkern/nxt_slam | python/nxt_slam.py | 1 | 23467 | import robot
from ultrasound_fast import UltrasoundRobot
import PF
import PS
import math
import numpy
#from matplotlib.pyplot import *
import pygame
import sys
#import copy
#import matplotlib.pyplot
class MyGUI(object):
""" Draw a simple GUI for the SLAM visualisation """
def __init__(self, ubot, use_gui):
# Array dimensions
self.dim = numpy.shape(ubot.map.get_nav_map()[0])
print self.dim
self.dim = self.dim[::-1]
self.num_angles = ubot.num_angles
# Configure graphics
if (use_gui):
""" Create gui object """
pygame.display.init()
#self.screen = pygame.display.set_mode( numpy.asarray((9, 4))*self.dim, 0, 32 )
self.screen = pygame.display.set_mode(numpy.asarray((8, 4))*self.dim, 0, 32 )
else:
self.screen = pygame.Surface(numpy.asarray((8, 4))*self.dim, 0, 32)
self.palette = tuple([(i, i, i) for i in range(256)])
self.best_scale = numpy.array([4, 4])
self.image_path = None
self.use_gui = use_gui
self.index = 1
def set_image_path(self, path):
self.image_path = path
def save_image(self):
filename = self.image_path + "%06d.jpg" % (self.index,)
pygame.image.save(self.screen, filename)
self.index += 1
def draw_smoothing(self, straj_rev, best, particles, top_ind, dead_reckoning, m_slam):
try:
self.draw_best(best)
self.plot_particles(best, particles, dead_reckoning, m_slam, top_ind)
self.draw_top(particles[top_ind])
scolor = (128, 128, 128, 0)
pcolor = (255, 255, 0, 0)
bounds = best.map.bounds
for p in straj_rev:
self.draw_part(p.eta[:2], bounds, pcolor, 1)
if (self.use_gui):
pygame.display.flip()
if (self.image_path != None):
self.save_image()
except IndexError:
print "GUI error!"
def draw(self, best, particles, top_ind, dead_reckoning, m_slam):
""" Draw everything """
# tmp = 1000.0*map.get_map()
#
# matplotlib.pyplot.pcolor(numpy.arange(map.bounds[0], map.bounds[1],
# map.resolution),
# numpy.arange(map.bounds[2], map.bounds[3],
# map.resolution),
# tmp, shading='faceted')
# matplotlib.pyplot.colorbar()
# matplotlib.pyplot.show()
#matplotlib.pyplot.show()
try:
self.draw_best(best)
self.plot_particles(best, particles, dead_reckoning, m_slam, top_ind)
self.draw_top(particles[top_ind])
if (self.use_gui):
pygame.display.flip()
if (self.image_path != None):
self.save_image()
except IndexError:
print "GUI error!"
def draw_best(self, best):
""" Draw the "best particle """
self.draw_map(best.map, (0,0), self.best_scale)
def draw_top(self, top):
""" Draw the k-th best particle maps """
if (len(top) >= 1):
self.draw_map(top[0].map, self.dim*numpy.array((4, 0)), self.best_scale)
# if (self.use_gui):
# for k in range(1,min(len(top),5)):
# print k
# self.draw_map(top[k].map, self.dim*numpy.array((8, (k-1))), (1, 1))
#
return
def draw_map(self, map, pos, scale):
""" Draw map """
(prob, var) = map.get_nav_map()
prob = numpy.copy(prob)
var = numpy.copy(var)
# lpmin = -20
# lpmax = 20
# lprob[lprob < lpmin] = lpmin
# lprob[lprob > lpmax] = lpmax
# lprob -= lpmin
# lprob *= (255.0/(lpmax-lpmin))
comb = numpy.empty((prob.shape[1], prob.shape[0], 3))
# over-/underflow problems
var[var < 0] = 0
var[var > unknown] = unknown
# arbitrary scaling to make it looks nice in the gui
prob[var >= unknown] = 0.0
#prob[prob > 0.1] = 0.1
#prob = 10.0*prob
tmp = numpy.exp(-var*2.0)
#tmp = var.T
#tmp[tmp > 255.0] = 255.0
tmp2 = 0.0*prob
tmp2[var >= 50] = 255.0
comb[:,:,0] = 255.0*prob
comb[:,:,1] = 255.0*tmp
comb[:,:,2] = tmp2
#surf = pygame.surfarray.make_surface(lprob.T)
surf = pygame.surfarray.make_surface(comb)
surf = pygame.transform.scale(surf, numpy.asarray(scale)*numpy.asarray(surf.get_size()))
#surf.set_palette(self.palette)
surf = pygame.transform.flip(surf, False, True)
self.screen.blit( surf, pos )
pos = (pos[0], pos[1] + surf.get_size()[1])
def draw_part(self,state, bounds, color, csize, offset=(0, 0)):
""" Plot single particle """
width = bounds[1] - bounds[0]
height = bounds[3] - bounds[2]
size = numpy.asarray((height, width))
state = numpy.copy(state)
state[1] = bounds[3] - state[1]
state[0] -= bounds[0]
image_size = self.best_scale * self.dim[::-1]
pos = state / size * image_size + self.best_scale / 2
pos = numpy.array(pos, dtype=int)
pygame.draw.circle(self.screen, color, (pos[0]+offset[0], pos[1]+offset[1]), csize, 0)
def plot_particles(self, best, particles, dead_reckoning, m_slam, top_ind=None):
""" Plot all particles along with best guess """
best_color = (255, 255, 255, 0)
nb1_color = (0, 0, 255, 0)
black = (0, 0 ,0 ,0)
white = (255, 255 ,255 ,0)
grey = (128, 128 ,128 ,0)
other_color = (0, 255, 255, 0)
bad_color = (255, 20, 147, 0)
bounds = best.map.bounds
for p in particles:
self.draw_part(p.robot.get_state()[:2], bounds, other_color, 1)
if (top_ind != None):
p = particles[top_ind[0]]
self.draw_part(p.robot.get_state()[:2], bounds, nb1_color, 2)
self.draw_part(p.robot.get_state()[:2], bounds, best_color, 2, self.dim*numpy.array((4, 0)))
self.draw_part(m_slam[:2], bounds, grey, 4)
self.draw_part(m_slam[:2], bounds, nb1_color, 3)
self.draw_part(dead_reckoning.get_state()[:2], bounds, grey, 4)
self.draw_part(dead_reckoning.get_state()[:2], bounds, bad_color, 3)
#self.draw_part(best.robot.state[:2], bounds, grey, 3)
self.draw_part(best.robot.get_state()[:2], bounds, best_color, 2)
def calc_est(slam):
n_slam = len(slam.part)
m_slam = numpy.zeros((3,))
v_slam = numpy.zeros((3,))
w = numpy.exp(slam.w)
for i in range(n_slam):
m_slam[:2] = m_slam[:2] + w[i]*slam.part[i].robot.robot.state[:2]
m_slam[2] = m_slam[2] + w[i]*(numpy.mod(math.pi + slam.part[i].robot.robot.state[2],
2*math.pi) - math.pi)
m_slam = m_slam/sum(w)
for i in range(n_slam):
v_slam[:2] = v_slam[:2] + w[i]*(slam.part[i].robot.robot.state[:2]-m_slam[:2])**2
v_slam[2] = v_slam[2] + w[i]*(numpy.mod(math.pi + slam.part[i].robot.robot.state[2]-m_slam[2],
2*math.pi) - math.pi)**2
v_slam = v_slam/sum(w)
return (m_slam, v_slam)
def output_stats(out_file, ref, slam, dumb):
# Calculate and output statistics for the different estimations
(m_slam, v_slam) = calc_est(slam)
# bias_slam = m_slam - ref.robot.state
# bias_slam[2] = numpy.mod(math.pi + bias_slam[2], 2*math.pi) - math.pi
# bias_dumb = m_dumb - ref.robot.state
# bias_dumb[2] = numpy.mod(math.pi + bias_dumb[2], 2*math.pi) - math.pi
#
best_ind = numpy.argsort(slam.w)[0]
# best_bias = numpy.zeros((3,))
# best_bias[:2] = slam.part[best_ind].robot.state[:2] - ref.robot.state[:2]
# best_bias[2] = numpy.mod(math.pi + slam.part[best_ind].robot.state[2] - ref.robot.state[2],
# 2*math.pi) - math.pi
# Ref (x,y,theta) dead-reckoning (x,y,theta) slam_avg (x,y,theta) slam_var (x,y,theta), slam_best (x,y,theta)
line = "%f, %f, %f, " % tuple(ref.robot.robot.state)
line += "%f, %f, %f, " % tuple(dumb.state)
line += "%f, %f, %f, " % tuple(m_slam)
line += "%f, %f, %f, " % tuple(v_slam)
line += "%f, %f, %f\n" % tuple(slam.part[best_ind].robot.robot.state)
out_file.write(line)
old_angle = math.pi/2
#numpy.seterr(all='raise')
config_phase = True
# Place-holder, will be assigned after config is determined
rob = None
ubot = None
pf = None
pf_dumb = None
gui = None
# Default values match Lego Mindstorms robot
wheel_base = 140.0/1000.0
wheel_size = 80.0/1000.0
wheel_ticks = -360.0
beam_width = math.pi/5
num_part = 50
angle_ticks = -360*5 # gear ratio 1:5, positive clockwise
dist_scale = 1.0/100.0 # cm to m
enc_noise = 0.1
enc_noise_lin = 0.1
theta_noise = 0.1
theta_noise_lin = 0.1
#noise = numpy.asarray([2, 2, 0.05])
#state_noise_off = numpy.asarray([0.05, 0.05, 0.01])
#state_noise_1st = numpy.asarray([0.0, 0.0, 0.0])
bounds = (-2.0, 2.0, -2.0, 2.0)
offset = (0.09, 0.0, 0.0)
resolution = 0.05
image_path = None
stats_file = None
trunc_readings = None
lp_coeff = None
a_coeff = 5.0
r_coeff = 1.0
cov_offset = 1.0
#inv_offset = 1.0
decay = 0.0001
unknown = 50
num_angles = 1
detect_range = numpy.Inf
prior = 0.1
use_gui = True
# Smoothing config
num_back_traj = 10
filter_steps = 20
overlap_steps = 10
smooth_threshold = 2.0/3.0
min_smooth_len = 2
nth_smoothing = -1
resample_count = 0
pf_resample = 2.0/3.0
filter_only = False
no_more_data = False
j = 1
while not no_more_data:
line = sys.stdin.readline()
sys.stdout.write(line)
if line == '':
no_more_data = True
if line == '\n':
continue
words = line.split()
if (config_phase):
if (words[0].lower() == "wheel_base:".lower()):
wheel_base = float(words[1])
if (words[0].lower() == "wheel_size:".lower()):
wheel_size = float(words[1])
if (words[0].lower() == "wheel_ticks:".lower()):
wheel_ticks = float(words[1])
if (words[0].lower() == "beam_width:".lower()):
beam_width = float(words[1])
if (words[0].lower() == "num_part:".lower()):
num_part = int(float(words[1]))
if (words[0].lower() == "num_angles:".lower()):
num_angles = int(float(words[1]))
if (words[0].lower() == "lp_coeff:".lower()):
lp_coeff = float(words[1])
if (words[0].lower() == "a_coeff:".lower()):
a_coeff = float(words[1])
if (words[0].lower() == "r_coeff:".lower()):
r_coeff = float(words[1])
if (words[0].lower() == "cov_offset:".lower()):
cov_offset = float(words[1])
# if (words[0].lower() == "inv_offset:".lower()):
# inv_offset = float(words[1])
if (words[0].lower() == "angle_ticks:".lower()):
angle_ticks = float(words[1])
if (words[0].lower() == "unknown:".lower()):
unknown = float(words[1])
if (words[0].lower() == "decay:".lower()):
decay = float(words[1])
if (words[0].lower() == "prior:".lower()):
prior = float(words[1])
if (words[0].lower() == "dist_scale:".lower()):
dist_scale = float(words[1])
if (words[0].lower() == "enc_noise:".lower()):
enc_noise = numpy.asarray([ float(words[1]),])
if (words[0].lower() == "enc_noise_lin:".lower()):
enc_noise_lin = numpy.asarray([ float(words[1]),])
if (words[0].lower() == "theta_noise:".lower()):
theta_noise = numpy.asarray([ float(words[1]),])
if (words[0].lower() == "theta_noise_lin:".lower()):
theta_noise_lin = numpy.asarray([ float(words[1]),])
# if (words[0].lower() == "noise:".lower()):
# noise = numpy.asarray([ float(words[1]), float(words[2]), float(words[3])])
# if (words[0].lower() == "state_noise_off:".lower()):
# state_noise_off = numpy.asarray([ float(words[1]),
# float(words[2]),
# float(words[3])])
# if (words[0].lower() == "state_noise_1st:".lower()):
# state_noise_1st = numpy.asarray([ float(words[1]),
# float(words[2]),
# float(words[3])])
if (words[0].lower() == "offset:".lower()):
offset = [ float(words[1]), float(words[2]), float(words[3])]
if (words[0].lower() == "bounds:".lower()):
bounds = ( float(words[1]), float(words[2]),
float(words[3]), float(words[4]))
if (words[0].lower() == "resolution:".lower()):
resolution = float(words[1])
if (words[0].lower() == "trunc_readings:".lower()):
trunc_readings = float(words[1])
if (words[0].lower() == "detect_range:".lower()):
detect_range = float(words[1])
if (words[0].lower() == "save_images:".lower()):
image_path = words[1]
if (words[0].lower() == "stats_file:".lower()):
stats_file = open(words[1], 'w', 1024*1024)
if (words[0].lower() == "disable_gui".lower()):
use_gui = False
if (words[0].lower() == "num_back_traj:".lower()):
num_back_traj = int(words[1])
if (words[0].lower() == "filter_steps:".lower()):
filter_steps = int(words[1])
if (words[0].lower() == "overlap_steps:".lower()):
overlap_steps = int(words[1])
if (words[0].lower() == "smooth_threshold:".lower()):
smooth_threshold = float(words[1])
if (words[0].lower() == "min_smooth_len:".lower()):
min_smooth_len = int(words[1])
if (words[0].lower() == "nth_smoothing:".lower()):
nth_smoothing = int(words[1])
if (words[0].lower() == "pf_resample:".lower()):
pf_resample = float(words[1])
if (words[0].lower() == "filter_only:".lower()):
tmp = words[1].lower()
if (tmp in ['true', '1', 'on', 'yes']):
filter_only = True
else:
filter_only = False
if (words[0].lower() == "measure:".lower() or
words[0].lower() == "update:".lower() or
words[0].lower() == "refpos:".lower()):
config_phase = False
print "Using config: "
print "\twheel_base: %f" % wheel_base
print "\twheel_size: %f" % wheel_size
print "\twheel_ticks: %f" % wheel_ticks
print "\tbeam_width: %f" % beam_width
print "\tnum_part: %f" % num_part
print "\tangle_ticks: %f" % angle_ticks
print "\tdist_scale: %f" % dist_scale
# print "\tnoise: %f %f %f" % tuple(noise)
print "\tenc_noise: %f" % enc_noise
print "\tenc_noise_lin: %f" % enc_noise_lin
print "\ttheta_noise: %f" % theta_noise
print "\ttheat_noise_lin: %f" % theta_noise_lin
# print "\tstate_noise_off: %f %f %f" % tuple(state_noise_off)
# print "\tstate_noise_1st: %f %f %f" % tuple(state_noise_1st)
print "\toffset: %f %f %f" % tuple(offset)
print "\tbounds: %f %f %f %f" % bounds
print "\tresolution: %f" % resolution
if (image_path):
print "\timage path: %s" % image_path
if (trunc_readings):
print "\ttrunc_readings: %f" % trunc_readings
if (use_gui):
print "\tGUI enabled"
else:
print "\tGUI disabled"
print "\tdetect_range: %f" % detect_range
print "\tlp_coeff: %s" % lp_coeff
print "\ta_coeff: %f" % a_coeff
print "\tr_coeff: %f" % r_coeff
print "\tcov_offset: %f" % cov_offset
# print "\tinv_offset: %f" % inv_offset
print "\tunknown: %f" % unknown
print "\tdecay: %f" % decay
print "\tprior: %f" % prior
print "\tnum_angles: %f" % num_angles
print "\tstats_file: %s" % str(stats_file)
print "\tnum_back_traj: %f" % num_back_traj
print "\tfilter_steps: %f" % filter_steps
print "\toverlap_steps: %f" % overlap_steps
print "\tpf_resample: %s" % str(pf_resample)
print "\tfilter_only: %s" % str(filter_only)
print "\tsmooth_threshold: %f" % smooth_threshold
print "\tmin_smooth_len: %d" % min_smooth_len
print "\tnth_smoothing: %d" % nth_smoothing
# Configure all variables/objects
dead_reckoning = robot.ExactDifferentialRobot(
l=wheel_base,d=wheel_size,
ticks=wheel_ticks,
state=(0.0, 0.0, old_angle))
rob = robot.DifferentialRobot(l=wheel_base,d=wheel_size,
ticks=wheel_ticks,
enc_noise=enc_noise,
enc_noise_lin=enc_noise_lin,
theta_noise=theta_noise,
theta_noise_lin=theta_noise_lin,
state=(0.0, 0.0, old_angle))
ubot = UltrasoundRobot(offset=offset,
prec=beam_width,
rob=rob,
bounds=bounds,
resolution=resolution,
cov_offset=cov_offset,
a_coeff=a_coeff,
r_coeff=r_coeff,
unknown=unknown,
decay=decay,
prior=prior,
num_angles=num_angles,
detect_range=detect_range)
pa = PF.ParticleApproximation(num=num_part, seed=ubot)
pt = PF.ParticleTrajectory(pa, resample=pf_resample, lp_hack=lp_coeff)
gui = MyGUI(ubot, use_gui)
if (image_path):
gui.set_image_path(image_path)
if (not config_phase):
if ((not no_more_data) and words[0].lower() == "refpos:".lower()):
# Update correct position of robot in global referensframe
x = float(words[1])
y = float(words[2])
theta = float(words[3])
ubot.robot.set_pos(x,y,theta)
# Ref position comes before measurements update, calculate rel. err after
# receiving measurement.
else:
if ((not no_more_data) and words[0].lower() == "measure:".lower()):
# DifferentialRobot will handle conversion of these measurements
wa = float(words[1])
wb = float(words[2])
# Convert to angle
a = float(words[3])/angle_ticks*2*math.pi
# Convert measurment scales
d = float(words[4])*dist_scale
ubot.update([0.0, 0.0])
dead_reckoning.kinematic([wa, wb])
pt.update([wa, wb])
if ((not trunc_readings) or (d < trunc_readings)):
ubot.measure([a, d])
resampled = pt.measure([a, d])
if (resampled):
resample_count += 1
if (stats_file != None):
output_stats(stats_file, ubot, pt[-1].pa, dead_reckoning)
if ((not no_more_data) and words[0].lower() == "update:".lower()):
wa = float(words[1])
wb = float(words[2])
ubot.update([0.0, 0.0])
dead_reckoning.kinematic([wa, wb])
pt.update([wa, wb])
if (stats_file != None):
output_stats(stats_file, ubot, pt[-1].pa, dead_reckoning)
best_ind = pt[-1].pa.find_best_particles(n=min(5, num_part))
#gui.draw(pf.particles[best_ind[0]], pf.particles, best_ind[1:])
if (use_gui or image_path):
(m_slam, v_slam) = calc_est(pt[-1].pa)
gui.draw(ubot, pt[-1].pa.part, best_ind, dead_reckoning, m_slam)
# Do smoothing when particle efficency is too low or after fixed nbr of steps
if ((not filter_only) and len(pt) > min_smooth_len and
(pt.efficiency() < smooth_threshold or
len(pt) == filter_steps or no_more_data or
(nth_smoothing > 0 and resample_count >= nth_smoothing))):
resample_count = 0
smooth_ind = max(0, len(pt) - overlap_steps, int(math.ceil(len(pt)/2.0)))
signals = pt.extract_signals()
print "smoothing: len(pt)=%d, smooth_ind=%d, eff=%f" % (len(pt), smooth_ind,pt.efficiency())
# Used in CB below
(m_slam, v_slam) = calc_est(pt[-1].pa)
best_ind = pt[-1].pa.find_best_particles(min(5, num_part))
def draw_smoothing_cb(st_rev):
gui.draw_smoothing(st_rev, ubot, pt[-1].pa.part, best_ind,
dead_reckoning, m_slam)
#st = PS.do_smoothing(pt,num_back_traj, callback=draw_smoothing_cb)
st = PS.do_smoothing(pt,num_back_traj)
t0 = st[0].t[smooth_ind]
pt = None # Release references to no longer needed memory
st = PS.do_rb_smoothing(st)
pa = PS.extract_smooth_approx(st, smooth_ind)
st = None # Release references to no longer needed memory
# Resample to keep number of filtering particles constant
pa.resample(num_part)
# Create new ParticleTrajectory using smoothed approximation as initialisation
pt = PF.ParticleTrajectory(pa, resample=pf_resample, t0=t0, lp_hack=lp_coeff)
def draw_replay_cb(pt):
(m_slam, v_slam) = calc_est(pt[-1].pa)
best_ind = pt[-1].pa.find_best_particles(min(5, num_part))
gui.draw(ubot, pt[-1].pa.part, best_ind, dead_reckoning, m_slam)
# Propagate particle using old input and measurements
#PS.replay(pt, signals, smooth_ind, callback=draw_replay_cb)
PS.replay(pt, signals, smooth_ind, callback=None)
# Reset dumb trajectory to conserv memory
if (filter_only):
pt = pt.spawn()
j += 1
print "Simulation done"
| gpl-3.0 |
akpetty/ibtopo2016 | calc_6distributionsBULK.py | 1 | 7011 | ##############################################################
# Date: 20/01/16
# Name: calc_6distributionsBULK.py
# Author: Alek Petty
# Description: Script to calculate distributions of bulk topography stats (e.g. mean feature volume) across the MY/FY and CA/BC regions.
# Input requirements: Topography stats across all years
# Output: Distributions of bulk statistics (e.g. mean feature volunme)
import matplotlib
matplotlib.use("AGG")
# basemap import
from mpl_toolkits.basemap import Basemap, shiftgrid
# Numpy import
import numpy as np
import mpl_toolkits.basemap.pyproj as pyproj
from pylab import *
import IB_functions as ro
import numpy.ma as ma
from scipy.interpolate import griddata
import os
mplot = Basemap(projection='npstere',boundinglat=68,lon_0=0, resolution='l' )
#rcParams['font.family'] = 'serif'
#rcParams['font.serif'] = ['Computer Modern Roman']
#rcParams['text.usetex'] = True
def get_hist_year(region, type, year):
hist=[]
bins=[]
if (region==0):
region_lonlat = [-150, 10, 81, 90]
region_str='CA'
if (region==1):
region_lonlat = [-170, -120, 69, 79]
region_str='BC'
#areavollonlat_big=1 or areavollonlat=1 to include/not include small features
xptsT, yptsT, lonT, latT, sail_area_fracT, ridge_heightT, ice_volumeT= ro.get_bulk_ridge_stats(mib, mplot, year, datapath, areavollonlat=1)
if (bulk_type==0):
varT=np.copy(sail_area_fracT)
elif (bulk_type==1):
varT=np.copy(ice_volumeT)
elif (bulk_type==2):
varT=np.copy(ridge_heightT)
region_mask, xptsM, yptsM = ro.get_region_mask(rawdatapath, mplot)
region_maskR = griddata((xptsM.flatten(), yptsM.flatten()),region_mask.flatten(), (xptsT, yptsT), method='nearest')
#ice_type, xptsA, yptsA = ro.get_ice_type_year(mplot, year-2009, res=1)
ice_type, xptsA, yptsA = ro.get_mean_ice_type(mplot, rawdatapath, year, res=1)
ice_typeR = griddata((xptsA.flatten(), yptsA.flatten()),ice_type.flatten(), (xptsT, yptsT), method='nearest')
if (type==0):
mask = where((ice_typeR<1.1) & (ice_typeR>0.4)&(lonT>region_lonlat[0]) & (lonT<region_lonlat[1]) & (latT>region_lonlat[2]) & (latT<region_lonlat[3])& (region_maskR==8))
if (type==1):
mask = where((ice_typeR<0.6) & (ice_typeR>0.4)&(lonT>region_lonlat[0]) & (lonT<region_lonlat[1]) & (latT>region_lonlat[2]) & (latT<region_lonlat[3])& (region_maskR==8))
if (type==2):
mask = where((ice_typeR<1.1) & (ice_typeR>0.9)&(lonT>region_lonlat[0]) & (lonT<region_lonlat[1]) & (latT>region_lonlat[2]) & (latT<region_lonlat[3])& (region_maskR==8))
varT=varT[mask]
histT, binsT = np.histogram(varT, bins=bin_vals)
meanH = mean(varT)
medianH = median(varT)
stdH = std(varT)
modeH = binsT[argmax(histT)] + bin_width/2.
#hist.append(histT)
#bins.append(binsT)
return binsT, histT, meanH, medianH, modeH, stdH
def get_hist_allyears(region, type):
hist=[]
bins=[]
if (region==0):
region_lonlat = [-150, 10, 81, 90]
region_str='CA'
if (region==1):
region_lonlat = [-170, -120, 69, 79]
region_str='BC'
varALL=[]
for year in xrange(start_year, end_year+1):
print year
xptsT, yptsT, lonT, latT, sail_area_fracT, ridge_heightT, ice_volumeT= ro.get_bulk_ridge_stats(mib, mplot, year, datapath, areavollonlat=1)
if (bulk_type==0):
varT=np.copy(sail_area_fracT)
elif (bulk_type==1):
varT=np.copy(ice_volumeT)
elif (bulk_type==2):
varT=np.copy(ridge_heightT)
region_mask, xptsM, yptsM = ro.get_region_mask(rawdatapath, mplot)
region_maskR = griddata((xptsM.flatten(), yptsM.flatten()),region_mask.flatten(), (xptsT, yptsT), method='nearest')
ice_type, xptsA, yptsA = ro.get_mean_ice_type(mplot, rawdatapath,year, res=1)
ice_typeR = griddata((xptsA.flatten(), yptsA.flatten()),ice_type.flatten(), (xptsT, yptsT), method='nearest')
if (type==0):
mask = where((ice_typeR<1.1) & (ice_typeR>0.4)&(lonT>region_lonlat[0]) & (lonT<region_lonlat[1]) & (latT>region_lonlat[2]) & (latT<region_lonlat[3])& (region_maskR==8))
if (type==1):
mask = where((ice_typeR<0.6) & (ice_typeR>0.4)&(lonT>region_lonlat[0]) & (lonT<region_lonlat[1]) & (latT>region_lonlat[2]) & (latT<region_lonlat[3])& (region_maskR==8))
if (type==2):
mask = where((ice_typeR<1.1) & (ice_typeR>0.9)&(lonT>region_lonlat[0]) & (lonT<region_lonlat[1]) & (latT>region_lonlat[2]) & (latT<region_lonlat[3])& (region_maskR==8))
varT=varT[mask]
varALL.extend(varT)
histT, binsT = np.histogram(varALL, bins=bin_vals)
meanH = mean(varALL)
medianH = median(varALL)
stdH = std(varALL)
modeH = binsT[argmax(histT)] + bin_width/2.
#hist.append(histT)
#bins.append(binsT)
return binsT, histT, meanH, medianH, modeH, stdH
#--------------------------------------------------
#-------------- GET DMS Projection ------------------
mib=pyproj.Proj("+init=EPSG:3413")
bulk_type=1
if (bulk_type==0):
bulk_str='area'
elif (bulk_type==1):
bulk_str='vol'
elif (bulk_type==2):
bulk_str='height'
thresh=20
fadd=''
ftype='1km_xyres2m_'+str(thresh)+'cm'+fadd
datapath='./Data_output/'+ftype+'/'
figpath = './Figures/'
outpath= datapath+'DISTS/'
rawdatapath='../../DATA/'
if not os.path.exists(outpath):
os.makedirs(outpath)
bin_width = 0.01
start_h=0.
print start_h
end_h=0.5
bin_vals=np.arange(start_h,end_h, bin_width)
start_year=2009
end_year=2014
num_years = end_year - start_year + 1
histALL=[]
binsALL=[]
statsT = np.zeros((num_years+1, 4))
years = np.arange(start_year, end_year+1)
years.astype('str')
statsALL = np.zeros(((num_years+1)*3, 9))
for t in xrange(3):
for r in xrange(2):
for year in xrange(start_year, end_year+1):
print t, r, year
binsT, histT, meanHT, medianHT, modeHT, stdHT = get_hist_year(r, t, year)
binsT.dump(outpath+'binsT_r'+str(r)+'_t'+str(t)+'_'+ftype+str(year)+bulk_str+'.txt')
histT.dump(outpath+'histT_r'+str(r)+'_t'+str(t)+'_'+ftype+str(year)+bulk_str+'.txt')
statsT[year - start_year, 0] = meanHT
statsT[year - start_year, 1] = stdHT
#statsT[year - start_year, 1] = medianHT
statsT[year - start_year, 2] = modeHT
statsT[year - start_year, 3] = sum(histT)/1e4
binsT, histT, meanHT, medianHT, modeHT, stdHT = get_hist_allyears(r, t)
binsT.dump(outpath+'binsT_r'+str(r)+'_t'+str(t)+'_'+ftype+bulk_str+'ALLYEARS.txt')
histT.dump(outpath+'histT_r'+str(r)+'_t'+str(t)+'_'+ftype+bulk_str+'ALLYEARS.txt')
statsT[year - start_year+1,0] = meanHT
statsT[year - start_year+1,1] = stdHT
statsT[year - start_year+1,2] = modeHT
statsT[year - start_year+1,3] = sum(histT)/1e4
savetxt(outpath+'statsALL_r'+str(r)+'_t'+str(t)+'_'+ftype+bulk_str+'.txt', statsT, fmt='%.3f', header='Mean, SD, Mode, Number', delimiter='&')
statsALL[t*(num_years+1):(t*(num_years+1))+num_years+1, 0]=np.arange(2009, 2016)
statsALL[t*(num_years+1):(t*(num_years+1))+num_years+1,(r*4)+1:(r*4)+4+1 ]=statsT
#binsALL.append(binsT)
#histALL.append(histT)
savetxt(outpath+'statsALL_CABCMYFY_'+ftype+bulk_str+'.txt', statsALL, fmt='& %.0f & %.2f (%.2f) & %.2f & %.2f & %.2f (%.2f) & %.2f & %.2f \\', header='Year, Mean (SD), Mode, Number, Mean (SD), Mode, Number')
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/tests/backend_driver_sgskip.py | 1 | 14893 | """
==============
Backend Driver
==============
This is used to drive many of the examples across the backends, for
regression testing, and comparing backend efficiency.
You can specify the backends to be tested either via the --backends
switch, which takes a comma-separated list, or as separate arguments,
e.g.
python backend_driver.py agg ps
would test the agg and ps backends. If no arguments are given, a
default list of backends will be tested.
Interspersed with the backend arguments can be switches for the Python
interpreter executing the tests. If entering such arguments causes an
option parsing error with the driver script, separate them from driver
switches with a --.
"""
from __future__ import print_function, division
import os
import time
import sys
import glob
from optparse import OptionParser
import matplotlib.rcsetup as rcsetup
from matplotlib.cbook import Bunch, dedent
all_backends = list(rcsetup.all_backends) # to leave the original list alone
# actual physical directory for each dir
dirs = dict(files=os.path.join('..', 'lines_bars_and_markers'),
shapes=os.path.join('..', 'shapes_and_collections'),
images=os.path.join('..', 'images_contours_and_fields'),
pie=os.path.join('..', 'pie_and_polar_charts'),
text=os.path.join('..', 'text_labels_and_annotations'),
ticks=os.path.join('..', 'ticks_and_spines'),
subplots=os.path.join('..', 'subplots_axes_and_figures'),
specialty=os.path.join('..', 'specialty_plots'),
showcase=os.path.join('..', 'showcase'),
pylab=os.path.join('..', 'pylab_examples'),
api=os.path.join('..', 'api'),
units=os.path.join('..', 'units'),
mplot3d=os.path.join('..', 'mplot3d'),
colors=os.path.join('..', 'color'))
# files in each dir
files = dict()
files['lines'] = [
'barh.py',
'cohere.py',
'fill.py',
'fill_demo_features.py',
'line_demo_dash_control.py',
'line_styles_reference.py',
'scatter_with_legend.py'
]
files['shapes'] = [
'path_patch_demo.py',
'scatter_demo.py',
]
files['colors'] = [
'color_cycle_default.py',
'color_cycle_demo.py',
]
files['images'] = [
'image_demo.py',
'contourf_log.py',
]
files['statistics'] = [
'errorbar.py',
'errorbar_features.py',
'histogram_cumulative.py',
'histogram_features.py',
'histogram_histtypes.py',
'histogram_multihist.py',
]
files['pie'] = [
'pie_demo.py',
'polar_bar.py',
'polar_scatter.py',
]
files['text_labels_and_annotations'] = [
'accented_text.py',
'text_demo_fontdict.py',
'text_rotation.py',
'unicode_demo.py',
]
files['ticks_and_spines'] = [
'spines_demo_bounds.py',
'ticklabels_demo_rotation.py',
]
files['subplots_axes_and_figures'] = [
'subplot_demo.py',
]
files['showcase'] = [
'integral_demo.py',
]
files['pylab'] = [
'alignment_test.py',
'annotation_demo.py',
'annotation_demo.py',
'annotation_demo2.py',
'annotation_demo2.py',
'anscombe.py',
'arctest.py',
'arrow_demo.py',
'axes_demo.py',
'axes_props.py',
'axhspan_demo.py',
'axis_equal_demo.py',
'bar_stacked.py',
'barb_demo.py',
'barchart_demo.py',
'barcode_demo.py',
'boxplot_demo.py',
'broken_barh.py',
'color_by_yvalue.py',
'color_demo.py',
'colorbar_tick_labelling_demo.py',
'contour_demo.py',
'contour_image.py',
'contour_label_demo.py',
'contourf_demo.py',
'coords_demo.py',
'coords_report.py',
'csd_demo.py',
'cursor_demo.py',
'custom_cmap.py',
'custom_figure_class.py',
'custom_ticker1.py',
'customize_rc.py',
'dashpointlabel.py',
'date_demo_convert.py',
'date_demo_rrule.py',
'date_index_formatter.py',
'dolphin.py',
'ellipse_collection.py',
'ellipse_demo.py',
'ellipse_rotated.py',
'errorbar_limits.py',
'fancyarrow_demo.py',
'fancybox_demo.py',
'fancybox_demo2.py',
'fancytextbox_demo.py',
'figimage_demo.py',
'figlegend_demo.py',
'figure_title.py',
'fill_between_demo.py',
'fill_spiral.py',
'findobj_demo.py',
'fonts_demo.py',
'fonts_demo_kw.py',
'ganged_plots.py',
'geo_demo.py',
'gradient_bar.py',
'griddata_demo.py',
'hatch_demo.py',
'hexbin_demo.py',
'hexbin_demo2.py',
'vline_hline_demo.py',
'image_clip_path.py',
'image_demo.py',
'image_demo2.py',
'image_interp.py',
'image_masked.py',
'image_nonuniform.py',
'image_origin.py',
'image_slices_viewer.py',
'interp_demo.py',
'invert_axes.py',
'layer_images.py',
'legend_demo2.py',
'legend_demo3.py',
'line_collection.py',
'line_collection2.py',
'log_bar.py',
'log_demo.py',
'log_test.py',
'major_minor_demo1.py',
'major_minor_demo2.py',
'masked_demo.py',
'mathtext_demo.py',
'mathtext_examples.py',
'matshow.py',
'mri_demo.py',
'mri_with_eeg.py',
'multi_image.py',
'multiline.py',
'multiple_figs_demo.py',
'nan_test.py',
'scalarformatter.py',
'pcolor_demo.py',
'pcolor_log.py',
'pcolor_small.py',
'pie_demo2.py',
'plotfile_demo.py',
'polar_demo.py',
'polar_legend.py',
'psd_demo.py',
'psd_demo2.py',
'psd_demo3.py',
'quadmesh_demo.py',
'quiver_demo.py',
'scatter_custom_symbol.py',
'scatter_demo2.py',
'scatter_masked.py',
'scatter_profile.py',
'scatter_star_poly.py',
#'set_and_get.py',
'shared_axis_across_figures.py',
'shared_axis_demo.py',
'simple_plot.py',
'specgram_demo.py',
'spine_placement_demo.py',
'spy_demos.py',
'stem_plot.py',
'step_demo.py',
'stix_fonts_demo.py',
'subplots_adjust.py',
'symlog_demo.py',
'table_demo.py',
'text_rotation_relative_to_line.py',
'transoffset.py',
'xcorr_demo.py',
'zorder_demo.py',
]
files['api'] = [
'agg_oo.py',
'barchart_demo.py',
'bbox_intersect.py',
'collections_demo.py',
'colorbar_only.py',
'custom_projection_example.py',
'custom_scale_example.py',
'date_demo.py',
'date_index_formatter.py',
'donut_demo.py',
'font_family_rc.py',
'image_zcoord.py',
'joinstyle.py',
'legend_demo.py',
'line_with_text.py',
'logo2.py',
'mathtext_asarray.py',
'patch_collection.py',
'quad_bezier.py',
'scatter_piecharts.py',
'span_regions.py',
'two_scales.py',
'unicode_minus.py',
'watermark_image.py',
'watermark_text.py',
]
files['units'] = [
'annotate_with_units.py',
#'artist_tests.py', # broken, fixme
'bar_demo2.py',
#'bar_unit_demo.py', # broken, fixme
#'ellipse_with_units.py', # broken, fixme
'radian_demo.py',
'units_sample.py',
#'units_scatter.py', # broken, fixme
]
files['mplot3d'] = [
'2dcollections3d_demo.py',
'bars3d_demo.py',
'contour3d_demo.py',
'contour3d_demo2.py',
'contourf3d_demo.py',
'lines3d_demo.py',
'polys3d_demo.py',
'scatter3d_demo.py',
'surface3d_demo.py',
'surface3d_demo2.py',
'text3d_demo.py',
'wire3d_demo.py',
]
# dict from dir to files we know we don't want to test (e.g., examples
# not using pyplot, examples requiring user input, animation examples,
# examples that may only work in certain environs (usetex examples?),
# examples that generate multiple figures
excluded = {
'units': ['__init__.py', 'date_support.py', ],
}
def report_missing(dir, flist):
'report the py files in dir that are not in flist'
globstr = os.path.join(dir, '*.py')
fnames = glob.glob(globstr)
pyfiles = {os.path.split(fullpath)[-1] for fullpath in set(fnames)}
exclude = set(excluded.get(dir, []))
flist = set(flist)
missing = list(pyfiles - flist - exclude)
if missing:
print('%s files not tested: %s' % (dir, ', '.join(sorted(missing))))
def report_all_missing(directories):
for f in directories:
report_missing(dirs[f], files[f])
# tests known to fail on a given backend
failbackend = dict(
svg=('tex_demo.py', ),
agg=('hyperlinks.py', ),
pdf=('hyperlinks.py', ),
ps=('hyperlinks.py', ),
)
from matplotlib.compat import subprocess
def run(arglist):
try:
ret = subprocess.call(arglist)
except KeyboardInterrupt:
sys.exit()
else:
return ret
def drive(backend, directories, python=['python'], switches=[]):
exclude = failbackend.get(backend, [])
# Clear the destination directory for the examples
path = backend
if os.path.exists(path):
import glob
for fname in os.listdir(path):
os.unlink(os.path.join(path, fname))
else:
os.mkdir(backend)
failures = []
testcases = [os.path.join(dirs[d], fname)
for d in directories
for fname in files[d]]
for fullpath in testcases:
print('\tdriving %-40s' % (fullpath))
sys.stdout.flush()
fpath, fname = os.path.split(fullpath)
if fname in exclude:
print('\tSkipping %s, known to fail on backend: %s' % backend)
continue
basename, ext = os.path.splitext(fname)
outfile = os.path.join(path, basename)
tmpfile_name = '_tmp_%s.py' % basename
tmpfile = open(tmpfile_name, 'w')
future_imports = 'from __future__ import division, print_function'
for line in open(fullpath):
line_lstrip = line.lstrip()
if line_lstrip.startswith("#"):
tmpfile.write(line)
elif 'unicode_literals' in line:
future_imports = future_imports + ', unicode_literals'
tmpfile.writelines((
future_imports + '\n',
'import sys\n',
'sys.path.append("%s")\n' % fpath.replace('\\', '\\\\'),
'import matplotlib\n',
'matplotlib.use("%s")\n' % backend,
'from pylab import savefig\n',
'import numpy\n',
'numpy.seterr(invalid="ignore")\n',
))
for line in open(fullpath):
line_lstrip = line.lstrip()
if (line_lstrip.startswith('from __future__ import') or
line_lstrip.startswith('matplotlib.use') or
line_lstrip.startswith('savefig') or
line_lstrip.startswith('show')):
continue
tmpfile.write(line)
if backend in rcsetup.interactive_bk:
tmpfile.write('show()')
else:
tmpfile.write('\nsavefig(r"%s", dpi=150)' % outfile)
tmpfile.close()
start_time = time.time()
program = [x % {'name': basename} for x in python]
ret = run(program + [tmpfile_name] + switches)
end_time = time.time()
print("%s %s" % ((end_time - start_time), ret))
# subprocess.call([python, tmpfile_name] + switches)
os.remove(tmpfile_name)
if ret:
failures.append(fullpath)
return failures
def parse_options():
doc = (__doc__ and __doc__.split('\n\n')) or " "
op = OptionParser(description=doc[0].strip(),
usage='%prog [options] [--] [backends and switches]',
#epilog='\n'.join(doc[1:]) # epilog not supported on my python2.4 machine: JDH
)
op.disable_interspersed_args()
op.set_defaults(dirs='pylab,api,units,mplot3d',
clean=False, coverage=False, valgrind=False)
op.add_option('-d', '--dirs', '--directories', type='string',
dest='dirs', help=dedent('''
Run only the tests in these directories; comma-separated list of
one or more of: pylab (or pylab_examples), api, units, mplot3d'''))
op.add_option('-b', '--backends', type='string', dest='backends',
help=dedent('''
Run tests only for these backends; comma-separated list of
one or more of: agg, ps, svg, pdf, template, cairo,
Default is everything except cairo.'''))
op.add_option('--clean', action='store_true', dest='clean',
help='Remove result directories, run no tests')
op.add_option('-c', '--coverage', action='store_true', dest='coverage',
help='Run in coverage.py')
op.add_option('-v', '--valgrind', action='store_true', dest='valgrind',
help='Run in valgrind')
options, args = op.parse_args()
switches = [x for x in args if x.startswith('--')]
backends = [x.lower() for x in args if not x.startswith('--')]
if options.backends:
backends += [be.lower() for be in options.backends.split(',')]
result = Bunch(
dirs=options.dirs.split(','),
backends=backends or ['agg', 'ps', 'svg', 'pdf', 'template'],
clean=options.clean,
coverage=options.coverage,
valgrind=options.valgrind,
switches=switches)
if 'pylab_examples' in result.dirs:
result.dirs[result.dirs.index('pylab_examples')] = 'pylab'
#print(result)
return (result)
if __name__ == '__main__':
times = {}
failures = {}
options = parse_options()
if options.clean:
localdirs = [d for d in glob.glob('*') if os.path.isdir(d)]
all_backends_set = set(all_backends)
for d in localdirs:
if d.lower() not in all_backends_set:
continue
print('removing %s' % d)
for fname in glob.glob(os.path.join(d, '*')):
os.remove(fname)
os.rmdir(d)
for fname in glob.glob('_tmp*.py'):
os.remove(fname)
print('all clean...')
raise SystemExit
if options.coverage:
python = ['coverage.py', '-x']
elif options.valgrind:
python = ['valgrind', '--tool=memcheck', '--leak-check=yes',
'--log-file=%(name)s', sys.executable]
elif sys.platform == 'win32':
python = [sys.executable]
else:
python = [sys.executable]
report_all_missing(options.dirs)
for backend in options.backends:
print('testing %s %s' % (backend, ' '.join(options.switches)))
t0 = time.time()
failures[backend] = \
drive(backend, options.dirs, python, options.switches)
t1 = time.time()
times[backend] = (t1 - t0)/60.0
#print(times)
for backend, elapsed in times.items():
print('Backend %s took %1.2f minutes to complete' % (backend, elapsed))
failed = failures[backend]
if failed:
print(' Failures: %s' % failed)
if 'template' in times:
print('\ttemplate ratio %1.3f, template residual %1.3f' % (
elapsed/times['template'], elapsed - times['template']))
| mit |
NeuroanatomyAndConnectivity/pipelines | src/clustering/coact_clustering_pipeline.py | 2 | 4019 | from nipype import config
config.enable_debug_mode()
import matplotlib
matplotlib.use('Agg')
import os
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.afni as afni
from clustering.cluster import Cluster
from clustering.similarity import Similarity
from clustering.mask_surface import MaskSurface
from clustering.mask_volume import MaskVolume
from clustering.concat import Concat
from clustering.cluster_map import ClusterMap
from coact_clustering_variables import fsaverage, workingdir, clusterdir, clustering_dg_template, clustering_dg_args, hemispheres, cluster_types, n_clusters, epsilon
def get_wf():
wf = pe.Workflow(name="main_workflow")
wf.base_dir = os.path.join(workingdir,"clustering_pipeline")
wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"
##Infosource##
fs_infosource = pe.Node(util.IdentityInterface(fields=['fs']), name="fs_infosource")
fs_infosource.iterables = ('fs', fsaverage)
hemi_infosource = pe.Node(util.IdentityInterface(fields=['hemi']), name="hemi_infosource")
hemi_infosource.iterables = ('hemi', hemispheres)
cluster_infosource = pe.Node(util.IdentityInterface(fields=['cluster']), name="cluster_infosource")
cluster_infosource.iterables = ('cluster', cluster_types)
n_clusters_infosource = pe.Node(util.IdentityInterface(fields=['n_clusters']), name="n_clusters_infosource")
n_clusters_infosource.iterables = ('n_clusters', n_clusters)
##Datagrabber##
datagrabber = pe.Node(nio.DataGrabber(infields=['fs','hemi'], outfields=['simmatrix','maskindex','targetmask']), name="datagrabber")
datagrabber.inputs.base_directory = '/'
datagrabber.inputs.template = '*'
datagrabber.inputs.field_template = clustering_dg_template
datagrabber.inputs.template_args = clustering_dg_args
datagrabber.inputs.sort_filelist = True
wf.connect(fs_infosource, 'fs', datagrabber, 'fs')
wf.connect(hemi_infosource, 'hemi', datagrabber, 'hemi')
##clustering##
clustering = pe.Node(Cluster(), name = 'clustering')
clustering.inputs.epsilon = epsilon
wf.connect(hemi_infosource, 'hemi', clustering, 'hemi')
wf.connect(cluster_infosource, 'cluster', clustering, 'cluster_type')
wf.connect(n_clusters_infosource, 'n_clusters', clustering, 'n_clusters')
wf.connect(datagrabber, 'simmatrix', clustering, 'in_File')
##reinflate to surface indices##
clustermap = pe.Node(ClusterMap(), name = 'clustermap')
wf.connect(clustering, 'out_File', clustermap, 'clusteredfile')
wf.connect(datagrabber, 'maskindex', clustermap, 'indicesfile')
wf.connect(datagrabber, 'targetmask', clustermap, 'maskfile')
##Datasink##
ds = pe.Node(nio.DataSink(), name="datasink")
ds.inputs.base_directory = clusterdir
wf.connect(clustermap, 'clustermapfile', ds, 'clustered')
wf.connect(clustermap, 'clustermaptext', ds, 'clustered.@1')
wf.write_graph()
return wf
if __name__ == '__main__':
cfg = dict(logging=dict(workflow_level = 'INFO'), execution={'remove_unnecessary_outputs': False, 'job_finished_timeout': 120, 'stop_on_first_rerun': False, 'stop_on_first_crash': True, 'display_variable':":1"} )
config.update_config(cfg)
wf = get_wf()
#wf.run(plugin="CondorDAGMan", plugin_args={'initial_specs':'requirements= Name == "namibia.cbs.mpg.de" \nuniverse = vanilla\nnotification = Error\ngetenv = true\nrequest_memory=4000'})
wf.run(plugin="CondorDAGMan", plugin_args={"initial_specs":"universe = vanilla\nnotification = Error\ngetenv = true\nrequest_memory=4000"})
#wf.run(plugin="MultiProc", plugin_args={"n_procs":8})
#wf.run(plugin='Linear')
#wf.run(plugin='Condor')
#wf.run(plugin="Condor", plugin_args={'initial_specs':'universe = vanilla\nnotification = Error\ngetenv = true\nrequest_memory=4000'})
| mit |
chrsrds/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
neuroidss/nupic.research | htmresearch/support/lateral_pooler/datasets.py | 4 | 5188 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
from scalar_encoder import ScalarEncoder
from scipy.io import loadmat
from sklearn.datasets import fetch_mldata
import itertools
def encode_streams(data_streams, bits_per_axis, weight_per_axis):
Y = data_streams
num_streams = Y.shape[0]
num_points = Y.shape[1]
enc = []
for i in range(num_streams):
enc.append(ScalarEncoder(0.,1.,bits_per_axis[i], weight_per_axis[i]))
X = np.zeros((np.sum(bits_per_axis), num_points))
for t in range(num_points):
X[:,t] = np.concatenate([enc[i](Y[i,t]) for i in range(num_streams)])
return X
def xy_biased(bits_per_axis=[50,150], weight_per_axis=[5,15], num_samples=100000):
Y = np.random.sample((2, num_samples))
X = encode_streams(Y, bits_per_axis, weight_per_axis)
Y_test = np.array([[0.1 + x*0.2, 0.1 + y*0.2] for y,x in itertools.product(range(5), repeat=2)]).T
X_test = encode_streams(Y_test, bits_per_axis, weight_per_axis)
return (X, Y, X_test, Y_test)
def mnist(threshold=0.3):
mnist = fetch_mldata('MNIST original')
X = mnist.data.T
Y = mnist.target.reshape((1,-1))
perm = np.random.permutation(X.shape[1])
X = X[:,perm]
Y = Y[:,perm]
X = X/255.0
X = (X > threshold).astype(float)
return (X[:,:60000], Y[:,:60000], X[:,-10000:], Y[:,-10000:])
def mnist_two_channel(threshold=0.2):
mnist = fetch_mldata('MNIST original')
X = mnist.data.T
Y = mnist.target.reshape((1,-1))
perm = np.random.permutation(X.shape[1])
X = X[:,perm]
Y = Y[:,perm]
X = X/255.0
X = (X > threshold).astype(float)
X2 = np.zeros((784,2,70000))
X2[:,1,:] = X
X2[:,0,:] = 1.0 - X
X2 = X2.reshape((784*2,70000))
return (X2[:,:60000], Y[:,:60000], X2[:,-10000:], Y[:,-10000:])
def uniform_2d(bits_per_axis=100, weight_per_axis=16, num_samples=60000):
R = np.random.randint(bits_per_axis - weight_per_axis, size=(2,num_samples))
Y = R/float(bits_per_axis - weight_per_axis)
X = np.zeros((bits_per_axis, bits_per_axis, num_samples))
C = np.zeros((2, num_samples))
X_test = np.zeros((bits_per_axis, bits_per_axis, 400))
C_test = np.zeros((2, 400))
R_test = np.random.randint(30, 60 - weight_per_axis, size=(2,400))
for t in range(num_samples):
C[0,t] = R[0,t] + weight_per_axis//2
C[1,t] = R[1,t] + weight_per_axis//2
for i in range(R[0,t], R[0,t] + weight_per_axis):
X[i, range(R[1,t], R[1,t] + weight_per_axis), t] = 1.0
for t in range(400):
C_test[0,t] = R_test[0,t] + weight_per_axis//2
C_test[1,t] = R_test[1,t] + weight_per_axis//2
for i in range(R_test[0,t], R_test[0,t] + weight_per_axis):
X_test[i, range(R_test[1,t], R_test[1,t] + weight_per_axis), t] = 1.0
X = X.reshape((bits_per_axis**2,-1))
X_test = X_test.reshape((bits_per_axis**2,-1))
return X[:,:80000], C[:,:80000], X_test[:,:], C_test[:,:]
def random_walk_2d(bits_per_axis=100, weight_per_axis=5, num_samples=500000):
radius = 30
steps = 100
w = weight_per_axis
bpa = bits_per_axis
X = np.zeros((bpa, bpa, num_samples))
for t in range(num_samples//steps):
cx = np.random.randint(0,bits_per_axis - radius)
cy = np.random.randint(0,bits_per_axis - radius)
for s in range(steps):
sx = np.random.randint(0, radius - w)
sy = np.random.randint(0, radius - w)
x = cx + sx
y = cy + sy
for i in range(x, x + w):
X[i, range(y, y + w), t*steps + s] = 1.0
X = X.reshape((bits_per_axis**2,-1))
return X[:,:]
def load_data(label):
if label == "mnist":
return mnist()
elif label == "mnist_two_channel":
return mnist_two_channel()
elif label == "xy_biased":
return xy_biased()
elif label == "xy_biased_big":
return xy_biased(bits_per_axis=[200,600], weight_per_axis=[20,60], num_samples=100000)
elif label == "uniform_2d":
return uniform_2d()
elif label == "random_walk_2d":
return random_walk_2d()
else:
raise "No data set with that label...."
| agpl-3.0 |
DonBeo/scikit-learn | sklearn/manifold/locally_linear.py | 21 | 24928 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
treycausey/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 8 | 6264 | """
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to :math:`\\frac{n_samples - n_features-1}{2}` outliers) estimator of
covariance. The idea is to find :math:`\\frac{n_samples+n_features+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided n_samples > 5 * n_features
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import pylab as pl
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
pl.subplot(2, 1, 1)
pl.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
pl.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
pl.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
pl.title("Influence of outliers on the location estimation")
pl.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
pl.legend(loc="upper left", prop=font_prop)
pl.subplot(2, 1, 2)
x_size = range_n_outliers.size
pl.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
pl.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
pl.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
pl.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
pl.title("Influence of outliers on the covariance estimation")
pl.xlabel("Amount of contamination (%)")
pl.ylabel("RMSE")
pl.legend(loc="upper center", prop=font_prop)
pl.show()
| bsd-3-clause |
equialgo/scikit-learn | examples/datasets/plot_iris_dataset.py | 36 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
rrohan/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
soett55/shell_script | docker_collection/pytorch-jup/jupyter_notebook_config.py | 2 | 22611 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
c.NotebookApp.allow_root = True
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '0.0.0.0'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/workspace'
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| gpl-3.0 |
hsiaoyi0504/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
hilaskis/UAV_MissionPlanner | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-2.0 |
anderspitman/scikit-bio | skbio/io/format/tests/test_blast7.py | 4 | 9957 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import numpy as np
from skbio.util import get_data_path, assert_data_frame_almost_equal
from skbio.io import BLAST7FormatError
from skbio.io.format.blast7 import _blast7_to_data_frame, _blast7_sniffer
class TestBLAST7Sniffer(unittest.TestCase):
def setUp(self):
self.positives = [get_data_path(e) for e in [
'blast7_default_single_line',
'blast7_default_multi_line',
'blast7_custom_minimal',
'blast7_custom_single_line',
'blast7_custom_multi_line',
'blast7_custom_mixed_nans',
'blast7_invalid_differing_fields',
'blast7_invalid_no_data',
'blast7_invalid_too_many_columns',
'legacy9_and_blast7_default',
'legacy9_invalid_too_many_columns',
'legacy9_mixed_nans',
'legacy9_multi_line',
'legacy9_single_line']]
self.negatives = [get_data_path(e) for e in [
'blast7_invalid_gibberish',
'blast7_invalid_for_sniffer',
'blast7_invalid_for_sniffer_2',
'empty']]
def test_positives(self):
for fp in self.positives:
self.assertEqual(_blast7_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negatives:
self.assertEqual(_blast7_sniffer(fp), (False, {}))
class TestBlast7Reader(unittest.TestCase):
def test_default_valid_single_line(self):
fp = get_data_path('blast7_default_single_line')
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query1', 'subject2', 100.00, 8.0, 0.0, 0.0, 1.0,
8.0, 3.0, 10.0, 9e-05, 16.9]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
fp = get_data_path('legacy9_single_line')
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query1', 'subject1', 90.00, 7.0, 1.0, 0.0, 0.0,
8.0, 4.0, 10.0, 1e-05, 15.5]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
def test_default_valid_multi_line(self):
fp = get_data_path('blast7_default_multi_line')
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query1', 'subject2', 70.00, 5.0, 0.0, 0.0, 7.0,
60.0, 3.0, 100.0, 9e-05, 10.5],
['query1', 'subject2', 30.00, 8.0, 0.0, 0.0, 6.0,
15.0, 1.0, 100.0, 0.053, 12.0],
['query1', 'subject2', 90.00, 2.0, 0.0, 0.0, 9.0,
35.0, 2.0, 100.0, 0.002, 8.3]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
fp = get_data_path('legacy9_multi_line')
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query1', 'subject1', 90.00, 7.0, 1.0, 0.0, 0.0,
8.0, 4.0, 10.0, 1e-05, 15.5],
['query1', 'subject1', 70.00, 8.0, 0.0, 1.0, 0.0,
9.0, 5.0, 7.0, 0.231, 7.8],
['query1', 'subject1', 90.00, 5.0, 1.0, 1.0, 0.0,
0.0, 2.0, 10.0, 0.022, 13.0]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
def test_default_valid_mixed_output(self):
fp = get_data_path('legacy9_and_blast7_default')
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query2', 'subject2', 100.00, 8.0, 0.0, 1.0, 0.0,
9.0, 3.0, 10.0, 2e-05, 9.8],
['query2', 'subject1', 70.00, 9.0, 1.0, 0.0, 1.0,
8.0, 4.0, 9.0, 0.025, 11.7]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
def test_custom_valid_minimal(self):
fp = get_data_path("blast7_custom_minimal")
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query1']], columns=['qseqid'])
assert_data_frame_almost_equal(df, exp)
def test_custom_valid_single_line(self):
fp = get_data_path("blast7_custom_single_line")
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([['query1', 100.00, 100.00, 8.0, 0.0, 16.9, 8.0,
'PAAWWWWW']],
columns=['qseqid', 'ppos', 'pident', 'length',
'sgi', 'bitscore', 'qend', 'qseq'])
assert_data_frame_almost_equal(df, exp)
def test_custom_valid_multi_line(self):
fp = get_data_path("blast7_custom_multi_line")
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([[1.0, 8.0, 3.0, 10.0, 8.0, 0.0, 1.0, 'query1',
'subject2'],
[2.0, 5.0, 2.0, 15.0, 8.0, 0.0, 2.0, 'query1',
'subject2'],
[1.0, 6.0, 2.0, 12.0, 8.0, 0.0, 1.0, 'query1',
'subject2']],
columns=['qstart', 'qend', 'sstart', 'send',
'nident', 'mismatch', 'sframe',
'qaccver', 'saccver'])
assert_data_frame_almost_equal(df, exp)
def test_custom_valid_mixed_nans(self):
fp = get_data_path("blast7_custom_mixed_nans")
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([[0.0, np.nan, 8.0, 13.0, 1.0, 1.0, np.nan,
'subject2'],
[np.nan, 0.0, 8.0, np.nan, 1.0, 1.0, 'query1',
np.nan]],
columns=['qgi', 'sgi', 'qlen', 'slen', 'qframe',
'sframe', 'qseqid', 'sseqid'])
assert_data_frame_almost_equal(df, exp)
def test_legacy9_valid_mixed_nans(self):
fp = get_data_path("legacy9_mixed_nans")
df = _blast7_to_data_frame(fp)
exp = pd.DataFrame([[np.nan, 'subject1', np.nan, 7.0, 1.0, 0.0, np.nan,
8.0, 4.0, 10.0, np.nan, 15.5],
['query2', 'subject1', 90.00, 8.0, np.nan, 0.0,
0.0, 8.0, np.nan, 9.0, 1e-05, np.nan]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
def test_differing_fields_error(self):
fp = get_data_path("blast7_invalid_differing_fields")
with self.assertRaisesRegex(
BLAST7FormatError,
"Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
" do.*\[.*'qseqid', .*'sseqid', .*'score'\]"):
_blast7_to_data_frame(fp)
fp = get_data_path("legacy9_invalid_differing_fields")
with self.assertRaisesRegex(
BLAST7FormatError,
"Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
" do.*\[.*'qseqid', .*'sseqid', "
".*'sallseqid'\]"):
_blast7_to_data_frame(fp)
def test_no_data_error(self):
fp = get_data_path("blast7_invalid_gibberish")
with self.assertRaisesRegex(BLAST7FormatError,
"File contains no"):
_blast7_to_data_frame(fp)
fp = get_data_path("blast7_invalid_no_data")
with self.assertRaisesRegex(BLAST7FormatError,
"File contains no"):
_blast7_to_data_frame(fp)
fp = get_data_path("empty")
with self.assertRaisesRegex(BLAST7FormatError,
"File contains no"):
_blast7_to_data_frame(fp)
def test_wrong_amount_of_columns_error(self):
fp = get_data_path("blast7_invalid_too_many_columns")
with self.assertRaisesRegex(BLAST7FormatError,
"Number of fields.*\(2\)"):
_blast7_to_data_frame(fp)
fp = get_data_path("legacy9_invalid_too_many_columns")
with self.assertRaisesRegex(BLAST7FormatError,
"Number of fields.*\(12\)"):
_blast7_to_data_frame(fp)
def test_unrecognized_field_error(self):
fp = get_data_path("blast7_invalid_unrecognized_field")
with self.assertRaisesRegex(BLAST7FormatError,
"Unrecognized field \(.*'sallid'\)"):
_blast7_to_data_frame(fp)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
pedrofeijao/RINGO | src/ringo/ml_estimate.py | 1 | 7248 | #!/usr/bin/env python2
import argparse
import collections
import os
import pyximport;
import sys
pyximport.install()
from model import BPGraph, CType
import file_ops
import numpy as np
from operator import mul
import itertools
from decimal import Decimal
import random
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
def expected_dcj_distance(genome1, genome2, n=0):
a = len(genome1.adjacency_set())
a2 = len(genome2.adjacency_set())
BP = a - len(genome1.common_adjacencies(genome2))
g = len(genome1.gene_set())
# n = np.math.sqrt(g)
n = g
if BP == n:
BP = n-1
# import ipdb;ipdb.set_trace()
return np.math.log(1.0 - (BP * (2.0*n - 1)) / (a * (2.0*n - 2))) / np.math.log(1 - (1.0 / (n - 1)) - 1.0 / n)
def probability(n, cycle_dist, st):
return Decimal(n_scenarios(cycle_dist, st)) / (n * (n - 1)) ** (st)
def cycle_splits(size):
# total: s(s-1)/2
# s of (1,s-1)
# s of (2,s-2)
# s of (3,s-3)
# ...
# and s/2 of (s/2,s/2) if s even
for i in range(1, (size - 1) / 2 + 1): # python 2: size/2 is rounded down, that is like a floor;
yield size, (i, size - i)
if size % 2 == 0:
yield size / 2, (size / 2, size / 2)
def memoize(f):
cache = {}
return lambda *args: cache[args] if args in cache else cache.update({args: f(*args)}) or cache[args]
@memoize
def n_scenarios(cycle_dist, steps):
n = sum(cycle_dist)
c = len(cycle_dist)
dist = n - c
if steps < dist:
return 0
# d+1 I know:
elif steps == dist + 1:
l = [(cycle - 1) for cycle in cycle_dist if cycle > 1]
m = reduce(mul, [(l_i + 1) ** (l_i - 1) for l_i in l], 1)
s = 0
for l_p in l:
f = np.math.factorial(l_p)
s1 = sum([f*((l_p+1)**i)/np.math.factorial(i) for i in range(l_p)])
s1 *= m
s1 /= (l_p+1)**(l_p-1)
s += s1
p1 = np.math.factorial(dist + 1)*s/2
p1 /= reduce(mul, [np.math.factorial(l_i) for l_i in l], 1)
return p1
# d is simple:
elif steps == dist:
l = [(cycle - 1) for cycle in cycle_dist if cycle > 1]
p1 = np.math.factorial(dist) / reduce(mul, [np.math.factorial(l_i) for l_i in l], 1)
p2 = reduce(mul, [(l_i + 1) ** (l_i - 1) for l_i in l], 1)
return p1 * p2
else: # more steps than distance; recursive:
# generate all possible cycle distributions from the current one:
cycle_dist_count = collections.defaultdict(lambda: 0)
cycle_dist_l = list(cycle_dist)
# find all cycle splits:
for idx, size_i in enumerate(cycle_dist_l):
for qty, (size_1, size_2) in cycle_splits(size_i):
new_dist = tuple(sorted(cycle_dist_l[:idx] + [size_1, size_2] + cycle_dist_l[(idx + 1):]))
cycle_dist_count[new_dist] += qty
# cycle freezes:
# freezes: C(s_i,2) for each cycle;
n_freezes = sum([l_i * (l_i - 1) / 2 for l_i in cycle_dist])
cycle_dist_count[cycle_dist] += n_freezes
# cycle merges:
# s_i x s_j of (s_i+s_j) for each pair
for i, j in itertools.combinations(range(len(cycle_dist)), 2):
l_i, l_j = cycle_dist[i], cycle_dist[j]
new_dist = tuple(sorted(cycle_dist_l[:i] + cycle_dist_l[(i + 1):j] + [l_i + l_j] + cycle_dist_l[(j + 1):]))
cycle_dist_count[new_dist] += 2 * l_i * l_j
# print cycle_dist_count
return sum(
[count_i * n_scenarios(cycle_dist_i, steps - 1) for cycle_dist_i, count_i in cycle_dist_count.iteritems()])
def random_walk(g1, g2, steps, n_walks=100000):
adj_2 = sorted(g2.adjacency_set())
hit = 0
for i in range(n_walks):
adj_1 = [[a, b] for a, b in g1.adjacency_set()]
for j in range(steps):
p, q = random.sample(range(len(adj_1)), 2)
if p < q:
adj_1[p][0], adj_1[q][0] = adj_1[q][0], adj_1[p][0]
else:
adj_1[p][0], adj_1[q][1] = adj_1[q][1], adj_1[p][0]
adj_1 = sorted([tuple(sorted((a, b))) for a, b in adj_1])
if adj_1 == adj_2:
hit += 1
print "hits: %e" % (float(hit) / n_walks)
# def sort_cycle_with_one_freeze(n):
# return sum([reduce(mul, [x for x in range(k+1, n+1)]) * (n ** k) for k in range(n - 1)])/2
def sort_cycle_with_one_freeze(n):
f = np.math.factorial(n)
return sum([f * n ** k / np.math.factorial(k) for k in range(n - 1)])/2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Finds the ML estimate for the DCJ distance between 2 genomes.")
parser.add_argument("-g", type=str, nargs='+', help="Genomes file(s)")
parser.add_argument("-i", type=int, nargs=2, default=[0, 1], help="Idx of the genomes")
param = parser.parse_args()
# n = 100
# print n_scenarios((n,), n)
# print sort_cycle_with_one_freeze(n)
# print sort_cycle_with_one_freeze2(n)
# print ",".join(map(str, [(i,n_scenarios((i,), i)) for i in range(30, 31)]))
# print ",".join(map(str, [ (n ,sort_cycle_with_one_freeze(n)) for n in range(30, 31)]))
# sys.exit()
n1, n2 = param.i
for filename in param.g:
genomes = file_ops.open_genome_file(filename, as_list=True)
g1 = genomes[int(n1)]
g2 = genomes[int(n2)]
bp = BPGraph(g1, g2)
n = len(bp.common_AB)
c = len(bp.type_dict[CType.CYCLE])
cycle_distribution = tuple(sorted([len(x) / 2 for x in bp.type_dict[CType.CYCLE]]))
# cycle_distribution = tuple([len(x) / 2 for x in bp.type_dict[CType.CYCLE]])
d = n - c
x = []
y = []
last_step = 0
down = 0
max_p = 0
max_k = 0
# DCJ estimate:
est_DCJ = expected_dcj_distance(g1,g2)
print "Distance:%d" % d,
print " Estimate: %.1f" % est_DCJ
# if there is no common adjacency, estimate goes to infinity, also in the DCJ estimate;
if all([c > 1 for c in cycle_distribution]):
# cheat and build a new one, by randomly picking an element and then removing a cycle from it;
cycle_distribution = list(cycle_distribution)
random.shuffle(cycle_distribution)
cycle = cycle_distribution.pop()
cycle_distribution = tuple(sorted([1, cycle - 1] + cycle_distribution))
for i in range(3*n):
prob = probability(n, cycle_distribution, d + i)
print >> sys.stderr, "Steps:%d P:%e" % (d + i, prob)
x.append(d + i)
y.append(prob)
if prob < last_step:
down += 1
if down == 2:
break
else:
down = 0
if max_p < prob:
max_p = prob
max_k = i + d
last_step = prob
plt.plot(x, y, 'o-')
plt.savefig(os.path.join(os.path.dirname(filename), 'ml.pdf'), bbox_inches='tight')
print "Max:", max_k
# save results:
with open(filename+".ml", "w") as f:
print >> f, "DCJ\tML\tEDCJ"
print >> f, "%d\t%d\t%.1f" % (d, max_k, est_DCJ)
| mit |
beeva-nievesabalos/ml-classifier-ships | ships_classifier.py | 1 | 4117 | # -*- coding: utf-8 -*-
import numpy as np
from scipy import sparse as sp
from sklearn import multiclass
from sklearn.cross_validation import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import os
from sklearn.externals import joblib
from sklearn.metrics import f1_score
pathDataC = '../Output/Posiciones Coasters/'
pathDataH = '../Output/Posiciones Handies/'
pathDataP = '../Output/Posiciones Panamax/'
# CLASIFICADOR por CAPACIDAD de los barcos
# coasters c 0
# handies h 1
# panamax/capes p 2
# PARTE 1: input de datos
# Matriz X
files=[]
tag_array=[]
for messagefile in os.listdir(pathDataC):
#print("\nFichero DATA coaster: " + pathDataC + messagefile)
# Abro el fichero a voy a leer
f = open(pathDataC + messagefile, 'r')
files.append(f.read())
tag_array.append(0)
for messagefile in os.listdir(pathDataH):
#print("\nFichero DATA handies: " + pathDataH + messagefile)
# Abro el fichero a voy a leer
f = open(pathDataH + messagefile, 'r')
files.append(f.read())
tag_array.append(1)
for messagefile in os.listdir(pathDataP):
#print("\nFichero DATA panamax-capes: " + pathDataP + messagefile)
# Abro el fichero a voy a leer
f = open(pathDataP + messagefile, 'r')
files.append(f.read())
tag_array.append(2)
# Vector Y (ver leyenda arriba)
Y=np.array(tag_array)
#joblib.dump(Y,'models/ships/vector_y.txt')
print("\n=======================================")
print("\nNúmero de correos TOTAL:")
print len(files)
filess=[f.split() for f in files]
# PARTE 2: procesamiento de datos, conteo de palabras
words={}
indcount=0
for f in filess:
for w in f:
if w not in words:
words[w]=indcount
indcount+=1
print("\nNúmero de palabras (total):")
print len(words)
joblib.dump(words,'models/ships_words.txt')
indices=[]
indptr=[0]
c=0
for iemail,f in enumerate(filess):
d=[words[w] for w in f]
c+=len(d)
indptr.append(c)
indices.extend(d)
print("\nNúmero de indices:")
print len(indices)
# Vector X: matriz con las palabras
X=sp.csr_matrix((np.ones(len(indices)),indices,indptr),shape=(len(files),len(words)))
#joblib.dump(X,'models/ships/vector_x.txt')
# PARTE 3: training
skf = StratifiedKFold(Y, 5) # era antes 5
# Calculo Y predicción
y_true=[]
y_predT=[]
for indtrain, indtest in skf:
xTrain=X[indtrain,:]
yTrain=Y[indtrain]
xTest=X[indtest,:]
yTest=Y[indtest]
y_true.extend(yTest)
# y_pred = multiclass.OneVsRestClassifier(LinearSVC(random_state=0)).fit(xTrain,yTrain).predict(xTest)
y_pred = multiclass.OneVsRestClassifier(LogisticRegression()).fit(xTrain,yTrain).predict(xTest)
y_predT.extend(y_pred)
# PARTE 3B: guardar el modelo
model = multiclass.OneVsRestClassifier(LogisticRegression()).fit(X,Y)
joblib.dump(model,'models/ships_model.txt')
# PARTE 4: evaluacion: y (real) vs ŷ (estimada)
print("\n============ EVALUATION ================")
print("\nAccuracy Score:")
print accuracy_score(y_true, y_predT, normalize=True)
#print("\nY estimada:")
#print y_predT
print("\n=======================================")
from sklearn.metrics import f1_score
#f1= f1_score(y_true, y_predT, average='macro')
#print(f1)
#print("\n=======================================")
#f2= f1_score(y_true, y_predT, average='micro')
#print(f2)
print("\n=======================================")
f3= f1_score(y_true, y_predT, average='weighted')
print(f3)
print("\n=======================================")
#f4= f1_score(y_true, y_predT, average=None)
#print(f4)
#print("\n=======================================")
# Compute confusion matrix
cm = confusion_matrix(y_true, y_predT)
print("\nConfusion matrix:")
print(cm)
# Show confusion matrix in a separate window
#pl.matshow(cm)
#pl.title('Confusion matrix')
#pl.colorbar()
#pl.ylabel('True label')
#pl.xlabel('Predicted label')
#pl.show()
| mit |
nchaparr/Sam_Output_Anls | plot_theta_profs.py | 1 | 4367 | import numpy as np
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from Make_Timelist import *
import sys
sys.path.insert(0, '/tera/phil/nchaparr/python')
import nchap_fun as nc
from matplotlib import rcParams
rcParams.update({'font.size': 10})
"""
For plotting the (scaled) temperature gradient and flux profiles.
"""
Fig1 = plt.figure(1)
Fig1.clf()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
Ax = Fig1.add_subplot(111)
#Ax.set_title( r'$\theta$', fontsize=20)
#Ax1.set_title( r'$\frac{\partial \theta}{\partial z}$', fontsize=20)
#Ax1.set_xlabel(r"$\frac{\frac{\partial \theta}{\partial z}}{\gamma}$", fontsize=20)
#Ax.set_xlabel(r"$\frac{ \partial \overline{\theta} }{\partial z} / \gamma$", fontsize=25)
Ax.set_xlabel(r"$\hat{\frac{\partial \overline{\theta}}{\partial z}}$", fontsize=35)
#Ax.set_xlabel(r"$\overline{w^{,}\theta^{,}}$", fontsize=20)
#Ax1.set_ylabel(r"$\frac{z}{h}$", fontsize=20)
Ax.set_ylabel(r"$\frac{z}{z_{g}}$", fontsize=40)
plt.xlim(-.1, 2)
#plt.xlim(-.0002, .019)
#plt.ylim(50, 950)
plt.ylim(0.2, 1.4)
dump_time_list, Times = Make_Timelists(1, 600, 28800)
marker_list=['ko', 'kv', 'yo', 'y*', 'ro', 'yv', 'rv']
legend_list=["100/5", "100/10", "60/5", "60/2.5", "150/5", "60/10", "150/10"]
run_name_list = ["Nov302013", "Dec142013", "Dec202013", "Dec252013", "Jan152014_1", "Mar12014", "Mar52014"]
flux_list = [100, 100, 60, 60, 150, 60, 150]
gamma_list=[.005, .01, .005, .0025, .005, .01, .01]
#choose a dumptime
#dump_time, Time = dump_time_list[time_index], Times[time_index]
##print Time
dump_time = "0000010800"
dump_time_index=29
dump_time_index0=19
theta_file_list = ["/tera/users/nchaparr/"+run_name+"/data/theta_bar"+ dump_time for run_name in run_name_list]
press_file_list = ["/tera/users/nchaparr/"+run_name+"/data/press"+ dump_time for run_name in run_name_list]
flux_file_list = ["/tera/users/nchaparr/"+run_name+"/data/wvelthetapert"+ dump_time for run_name in run_name_list]
height_file_list = ["/tera/users/nchaparr/"+run_name+"/data/heights0000000600" for run_name in run_name_list]
AvProfVars_list = ["/tera/users/nchaparr/"+run_name+"/data/AvProfLims" for run_name in run_name_list]
#loop over text files files
for i in range(len(theta_file_list)):
run_name = run_name_list[i]
theta = np.genfromtxt(theta_file_list[i])
height = np.genfromtxt(height_file_list[i])
gamma = gamma_list[i]
press = np.genfromtxt(press_file_list[i])
rhow = nc.calc_rhow(press, height, theta[0])
wvelthetapert = np.genfromtxt(flux_file_list[i])
wvelthetapert[0] = np.nan
AvProfVars = np.genfromtxt(AvProfVars_list[i])
#Now for the gradients
dheight = np.diff(height)
dtheta = np.diff(theta)
dthetadz = np.divide(dtheta, dheight)
element0 = np.array([0])
dthetadz=np.hstack((element0, 1.0*dthetadz))*1.0/gamma
#only need up to 2500meters
top_index = np.where(abs(1670 - height) < 40.)[0][0]
#where gradient is max, and flux is min
##print AvProfVars[:,1].shape, height.shape
if run_name == "Nov302013":
h1 = AvProfVars[dump_time_index0, 1]
else:
h1 = AvProfVars[dump_time_index, 1]
h_index=np.where(dthetadz - np.amax(dthetadz[:top_index])==0)[0][0]
h=height[h_index]
scaled_height = [1.0*ht/h for ht in height]
#print h1, h_index, height[h_index]
fluxes = np.multiply(wvelthetapert, rhow)*1004.0/flux_list[i]
Ax.text(1.8, 1.29, r'(b)', fontsize=30)
#Ax.text(.017, 1.29, r'(a)', fontsize=30)
Ax.plot(dthetadz, scaled_height, marker_list[i], label = legend_list[i], markersize=10) #,
zeros = np.zeros_like(height)
Ax.plot(zeros+.03, scaled_height, 'k-')
Ax.plot(zeros+1, scaled_height, 'k-')
#Ax.plot(zeros+.005, scaled_height, 'k-')
#Ax.plot(zeros+.0025, scaled_height, 'k-')
#Ax.plot(zeros+.01, scaled_height, 'k-')
#Ax.legend(numpoints=1, loc = 'lower right', prop={'size':14})
#Ax.set_xticks([.0025, .005, .01])
#Ax.set_xticklabels([".0025", ".005", ".01"])
Ax.set_xticks([0.03, 1])
Ax.set_xticklabels([0.03, 1])
Ax.tick_params(axis="both", labelsize=25)
plt.tight_layout()
plt.show()
#Fig1.savefig('/tera/phil/nchaparr/python/Plotting/Dec252013/pngs/theta_profs2hrs.png')
#Fig1.savefig('/tera/phil/nchaparr/python/Plotting/Dec252013/pngs/flux_profs2hrs.png')
| mit |
junpenglao/GLMM-in-Python | Playground.py | 1 | 19853 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 12 16:55:33 2016
@author: laoj
Experiment with different package
"""
#%% simulation data
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
M1 = 6 # number of columns in X - fixed effect
N1 = 10 # number of columns in L - random effect
nobs = 20
# generate design matrix using patsy
import statsmodels.formula.api as smf
from patsy import dmatrices
import pandas as pd
predictors = []
for s1 in range(N1):
for c1 in range(2):
for c2 in range(3):
for i in range(nobs):
predictors.append(np.asarray([c1+1,c2+1,s1+1]))
tbltest = pd.DataFrame(predictors, columns=['Condi1', 'Condi2', 'subj'])
tbltest['Condi1'] = tbltest['Condi1'].astype('category')
tbltest['Condi2'] = tbltest['Condi2'].astype('category')
tbltest['subj'] = tbltest['subj'].astype('category')
tbltest['tempresp'] = np.random.normal(size=(nobs*M1*N1,1))*10
Y, X = dmatrices("tempresp ~ Condi1*Condi2", data=tbltest, return_type='matrix')
Terms = X.design_info.column_names
_, Z = dmatrices('tempresp ~ -1+subj', data=tbltest, return_type='matrix')
X = np.asarray(X) # fixed effect
Z = np.asarray(Z) # mixed effect
Y = np.asarray(Y)
N,nfixed = np.shape(X)
_,nrandm = np.shape(Z)
# generate data
w0 = [5.0, 1.0, 2.0, 8.0, 1.0, 1.0] + np.random.randn(6)
#w0 -= np.mean(w0)
#w0 = np.random.normal(size=(M,))
z0 = np.random.normal(size=(N1,))*10
Pheno = np.dot(X,w0) + np.dot(Z,z0) + Y.flatten()
beta0 = np.linalg.lstsq(X,Pheno)
fixedpred = np.argmax(X,axis=1)
randmpred = np.argmax(Z,axis=1)
tbltest['Pheno'] = Pheno
md = smf.mixedlm("Pheno ~ Condi1*Condi2", tbltest, groups=tbltest["subj"])
mdf = md.fit()
Y = np.expand_dims(Pheno,axis=1)
fitted=mdf.fittedvalues
fe_params = pd.DataFrame(mdf.fe_params,columns=['LMM'])
fe_params.index=Terms
random_effects = pd.DataFrame(mdf.random_effects)
random_effects = random_effects.transpose()
random_effects = random_effects.rename(index=str, columns={'groups': 'LMM'})
#%% Real data
Tbl_beh = pd.read_csv('./behavioral_data.txt', delimiter='\t')
Tbl_beh["subj"] = Tbl_beh["subj"].astype('category')
tbltest = Tbl_beh
formula = "rt ~ group*orientation*identity"
#formula = "rt ~ -1 + cbcond"
md = smf.mixedlm(formula, tbltest, groups=tbltest["subj"])
mdf = md.fit()
Y, X = dmatrices(formula, data=tbltest, return_type='matrix')
Terms = X.design_info.column_names
_, Z = dmatrices('rt ~ -1+subj', data=tbltest, return_type='matrix')
X = np.asarray(X) # fixed effect
Z = np.asarray(Z) # mixed effect
Y = np.asarray(Y)
N,nfixed = np.shape(X)
_,nrandm = np.shape(Z)
fe_params = pd.DataFrame(mdf.fe_params,columns=['LMM'])
random_effects = pd.DataFrame(mdf.random_effects)
random_effects = random_effects.transpose()
random_effects = random_effects.rename(index=str, columns={'groups': 'LMM'})
fitted=mdf.fittedvalues
#%% ploting function
def plotfitted(fe_params=fe_params,random_effects=random_effects,X=X,Z=Z,Y=Y):
plt.figure(figsize=(18,9))
ax1 = plt.subplot2grid((2,2), (0, 0))
ax2 = plt.subplot2grid((2,2), (0, 1))
ax3 = plt.subplot2grid((2,2), (1, 0), colspan=2)
fe_params.plot(ax=ax1)
random_effects.plot(ax=ax2)
ax3.plot(Y.flatten(),'o',color='k',label = 'Observed', alpha=.25)
for iname in fe_params.columns.get_values():
fitted = np.dot(X,fe_params[iname])+np.dot(Z,random_effects[iname]).flatten()
print("The MSE of "+iname+ " is " + str(np.mean(np.square(Y.flatten()-fitted))))
ax3.plot(fitted,lw=1,label = iname, alpha=.5)
ax3.legend(loc=0)
#plt.ylim([0,5])
plt.show()
plotfitted(fe_params=fe_params,random_effects=random_effects,X=X,Z=Z,Y=Y)
#%% Bambi (tested on version 0.1.0)
from bambi import Model
from scipy.stats import norm
# Assume we already have our data loaded
model = Model(tbltest)
model.add(formula)
model.add(random=['1|subj'],
categorical=['group','orientation','identity','subj'])
model.build(backend='pymc')
# Plot prior
#p = len(model.terms)
#fig, axes = plt.subplots(int(np.ceil(p/2)), 2, figsize=(12,np.ceil(p/2)*2))
#
#for i, t in enumerate(model.terms.values()):
# m = t.prior.args['mu']
# sd = t.prior.args['sd']
# x = np.linspace(m - 3*sd, m + 3*sd, 100)
# y = norm.pdf(x, loc=m, scale=sd)
# axes[divmod(i,2)[0], divmod(i,2)[1]].plot(x,y)
# axes[divmod(i,2)[0], divmod(i,2)[1]].set_title(t.name)
#plt.subplots_adjust(wspace=.25, hspace=.5)
model.plot_priors(varnames=['Intercept','group','orientation',
'identity','group:orientation','group:identity',
'orientation:identity','group:orientation:identity'])
plt.show()
results = model.fit(formula, random=['1|subj'],
categorical=['group','orientation','identity','subj'],
samples=2000, chains=2)
_ = results.plot(varnames=['Intercept','group','orientation',
'identity','group:orientation','group:identity',
'orientation:identity','group:orientation:identity'])
_ = results.plot(varnames=['1|subj'])
burn_in=1000
summary = results[burn_in:].summary(ranefs=True)
print(summary)
# tracedf = results[burn_in:].to_df(ranefs=True)
fe_params['Bambi'] = summary[summary.index.isin(fe_params.index)]['mean']
random_effects['Bambi'] = summary.loc[['1|subj['+ind_re+']'
for ind_re in random_effects.index]]['mean'].values
#%% Tensorflow
import tensorflow as tf
tf.reset_default_graph()
def tfmixedmodel(X, beta, Z, b):
with tf.name_scope("fixedEffect"):
fe = tf.matmul(X, beta)
with tf.name_scope("randomEffect"):
re = tf.matmul(Z, b)
#randcoef = tf.matmul(Z, b)
#Xnew = tf.transpose(X) * tf.transpose(randcoef)
#y_pred = tf.matmul(tf.transpose(Xnew), beta)
return tf.add(fe,re) # notice we use the same model as linear regression,
# this is because there is a baked in cost function which performs softmax and cross entropy
Xtf = tf.placeholder("float32", [None, nfixed]) # create symbolic variables
Ztf = tf.placeholder("float32", [None, nrandm])
y = tf.placeholder("float32", [None, 1])
beta_tf = tf.Variable(tf.random_normal([nfixed, 1], stddev=1, name="fixed_beta"))
b_tf = tf.Variable(tf.random_normal([nrandm, 1], stddev=1, name="random_b"))
b_tf = b_tf - tf.reduce_mean(b_tf)
eps_tf = tf.Variable(tf.random_normal([0], stddev=1, name="eps"))
y_ = tfmixedmodel(Xtf, beta_tf, Ztf, b_tf)
#y_ = tf.nn.softmax(tf.matmul(Xtf, beta) + tf.matmul(Ztf, b) + eps)
# Add histogram summaries for weights
tf.summary.histogram("fixed", beta_tf)
tf.summary.histogram("random", b_tf)
nb_epoch = 5000
batch_size = 100
with tf.name_scope("cost"):
#cost = tf.reduce_sum(tf.pow(y - y_, 2))
#train_step = tf.train.RMSPropOptimizer(0.01, epsilon=1.0).minimize(cost)
cost = tf.reduce_mean(tf.square(y - y_)) # use square error for cost function
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
# Add scalar summary for cost
tf.summary.scalar("cost", cost)
with tf.name_scope("SSE"):
sse = tf.reduce_mean(tf.cast(cost, tf.float32))
# Add scalar summary for SSE
tf.summary.scalar("SSE", sse)
with tf.Session() as sess:
# create a log writer. run 'tensorboard --logdir=/tmp/GLMMtest'
writer = tf.summary.FileWriter("/tmp/GLMMtest", sess.graph) # for 0.8
merged = tf.summary.merge_all()
# you need to initialize all variables
tf.global_variables_initializer().run()
for i in range(nb_epoch):
shuffleidx = np.random.permutation(N)
for start, end in zip(range(0, N, batch_size), range(batch_size, N, batch_size)):
batch_xs, batch_zs, batch_ys = X[shuffleidx[start:end]],Z[shuffleidx[start:end]],Y[shuffleidx[start:end]]
sess.run(train_step, feed_dict={Xtf: batch_xs, Ztf: batch_zs, y: batch_ys})
summary, acc = sess.run([merged, sse],
feed_dict={Xtf: X, Ztf: Z, y: Y})
writer.add_summary(summary, i) # Write summary
if (i % 100 == 0):
print(i, acc)
betatf = sess.run(beta_tf)
btf = sess.run(b_tf)
fe_params['TF'] = pd.Series(betatf.flatten(), index=fe_params.index)
random_effects['TF'] = pd.Series(btf.flatten(), index=random_effects.index)
#%% variational inference (using tensorflow)
"""
Using mean field variational inference on ELBO as explained in
https://github.com/blei-lab/edward/blob/master/edward/inferences/klqp.py
WARNING: SLOW
"""
tf.reset_default_graph()
Xtf = tf.placeholder("float32", [None, nfixed]) # create symbolic variables
Ztf = tf.placeholder("float32", [None, nrandm])
y = tf.placeholder("float32", [None, 1])
priorstd = 1
from tensorflow.contrib.distributions import Normal
#fixed effect
eps_fe = tf.random_normal([nfixed, 1], name='eps_fe')
beta_mu = tf.Variable(tf.random_normal([nfixed, 1], stddev=priorstd), name="fixed_mu")
##diag cov
beta_logvar = tf.Variable(tf.random_normal([nfixed, 1], stddev=priorstd), name="fixed_logvar")
std_encoder1 = tf.exp(0.5 * beta_logvar)
beta_tf = Normal(loc=beta_mu, scale=std_encoder1)
#random effect
eps_rd = tf.random_normal([nrandm, 1], name='eps_rd')
b_mu = tf.Variable(tf.random_normal([nrandm, 1], stddev=priorstd), name="randm_mu")
b_mu = b_mu - tf.reduce_mean(b_mu)
b_logvar = tf.Variable(tf.random_normal([nrandm, 1], stddev=priorstd), name="randm_logvar")
std_encoder2 = tf.exp(0.5 * b_logvar)
b_tf = Normal(loc=b_mu, scale=std_encoder2)
# MixedModel
y_mu = tfmixedmodel(Xtf, beta_mu, Ztf, b_mu)
# Add histogram summaries for weights
tf.summary.histogram("fixed", beta_mu)
tf.summary.histogram("random", b_mu)
nb_epoch = 1000
batch_size = 100
priormu, priorsigma, priorliksigma= 0.0, 100.0, 10.0
n_samples = 5 #5-10 might be enough
with tf.name_scope("cost"):
#mean_squared_error
RSEcost = tf.reduce_mean(tf.square(y - y_mu)) # use square error for cost function
# #negative log-likelihood (same as maximum-likelihood)
# y_sigma = tf.sqrt(tfmixedmodel(Xtf, tf.square(std_encoder1), Ztf, tf.square(std_encoder2)))
# NLLcost = - tf.reduce_sum(-0.5 * tf.log(2. * np.pi) - tf.log(y_sigma)
# -0.5 * tf.square((y - y_mu)/y_sigma))
#Mean-field Variational inference using ELBO
p_log_prob = [0.0] * n_samples
q_log_prob = [0.0] * n_samples
for s in range(n_samples):
beta_tf_copy = Normal(loc=beta_mu, scale=std_encoder1)
beta_sample = beta_tf_copy.sample()
q_log_prob[s] += tf.reduce_sum(beta_tf.log_prob(beta_sample))
b_tf_copy = Normal(loc=b_mu, scale=std_encoder2)
b_sample = b_tf_copy.sample()
q_log_prob[s] += tf.reduce_sum(b_tf.log_prob(b_sample))
priormodel = Normal(loc=priormu, scale=priorsigma)
y_sample = tf.matmul(Xtf, beta_sample) + tf.matmul(Ztf, b_sample)
p_log_prob[s] += tf.reduce_sum(priormodel.log_prob(beta_sample))
p_log_prob[s] += tf.reduce_sum(priormodel.log_prob(b_sample))
modelcopy = Normal(loc=y_sample, scale=priorliksigma)
p_log_prob[s] += tf.reduce_sum(modelcopy.log_prob(y))
p_log_prob = tf.stack(p_log_prob)
q_log_prob = tf.stack(q_log_prob)
ELBO = -tf.reduce_mean(p_log_prob - q_log_prob)
#train_step = tf.train.AdamOptimizer(0.01).minimize(NLLcost)
#train_step = tf.train.AdagradOptimizer(0.1).minimize(ELBO)
#train_step = tf.train.RMSPropOptimizer(0.01, epsilon=0.1).minimize(NLLcost)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(ELBO)
# Add scalar summary for cost
tf.summary.scalar("cost", ELBO)
with tf.name_scope("MSE"):
sse = tf.reduce_mean(tf.cast(RSEcost, tf.float32))
# Add scalar summary for SSE
tf.summary.scalar("MSE", sse)
with tf.Session() as sess:
# create a log writer. run 'tensorboard --logdir=/tmp/GLMMtest'
writer = tf.summary.FileWriter("/tmp/GLMMtest", sess.graph) # for 0.8
merged = tf.summary.merge_all()
# you need to initialize all variables
tf.global_variables_initializer().run()
for i in range(nb_epoch):
shuffleidx = np.random.permutation(N)
for start, end in zip(range(0, N, batch_size), range(batch_size, N, batch_size)):
batch_xs, batch_zs, batch_ys = X[shuffleidx[start:end]],Z[shuffleidx[start:end]],Y[shuffleidx[start:end]]
sess.run(train_step, feed_dict={Xtf: batch_xs, Ztf: batch_zs, y: batch_ys})
summary, acc = sess.run([merged, sse],
feed_dict={Xtf: X, Ztf: Z, y: Y})
writer.add_summary(summary, i) # Write summary
if (i % 1000 == 0):
print(i, acc)
betatf = sess.run(beta_mu)
btf = sess.run(b_mu)
betatf_std = sess.run(std_encoder1)
btf_std = sess.run(std_encoder2)
fe_params['TF_VA'] = pd.Series(betatf.flatten(), index=fe_params.index)
random_effects['TF_VA'] = pd.Series(btf.flatten(), index=random_effects.index)
sess.close()
#%% variational inference (using Edward)
"""
https://github.com/blei-lab/edward/blob/master/notebooks/linear_mixed_effects_models.ipynb
"""
import edward as ed
import tensorflow as tf
from edward.models import Normal
#DATA
X1 = X[:,1:]
#X_mean = np.mean(X1,axis=0) # column means of X before centering
#X_cent = X1 - X_mean
#x_train, y_train = X_cent , Y
x_train,z_train,y_train = X1.astype('float32'), Z.astype('float32'), Y.flatten()
N, D = x_train.shape # num features
Db = z_train.shape[1]
# Set up placeholders for the data inputs.
Xnew = tf.placeholder(tf.float32, shape=(None, D))
Znew = tf.placeholder(tf.float32, shape=(None, Db))
# MODEL
Wf = Normal(loc=tf.zeros([D]), scale=tf.ones([D]))
Wb = Normal(loc=tf.zeros([Db]), scale=tf.ones([Db]))
Ib = Normal(loc=tf.zeros([1]), scale=tf.ones(1))
# INFERENCE
qi_mu = tf.Variable(tf.random_normal([1]))
qi_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([1])))
qi = Normal(loc=qi_mu, scale=qi_sigma)
#qw_mu = tf.expand_dims(tf.convert_to_tensor(beta0[0].astype(np.float32)),1)
qw_mu = tf.Variable(tf.random_normal([D]))
qw_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([D])))
qw = Normal(loc=qw_mu, scale=qw_sigma)
#qb_mu = tf.Variable(tf.random_normal([Db,1]))
qb_mu = tf.Variable(tf.random_normal([Db])) #force the random coeff to be zero-distributed
#qb_mu = qb_mu - tf.reduce_mean(qb_mu)
qb_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([Db])))
qb = Normal(loc=qb_mu, scale=qb_sigma)
yhat = ed.dot(Xnew, Wf)+ed.dot(Znew, Wb)+Ib
y = Normal(loc=yhat, scale=tf.ones(N))
sess = ed.get_session()
inference = ed.KLqp({Wf: qw, Wb: qb, Ib: qi},
data={y: y_train, Xnew: x_train, Znew:z_train})
#optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
optimizer = tf.train.RMSPropOptimizer(1., epsilon=1.0)
#optimizer = tf.train.GradientDescentOptimizer(0.01)
inference.run(optimizer=optimizer, n_samples=20, n_iter=10000)
#inference.run(n_samples=20, n_iter=20000)
i_mean, i_std, w_mean, w_std, b_mean, b_std = sess.run([qi.loc, qi.scale, qw.loc,
qw.scale,qb.loc, qb.scale])
#fixed_ed = np.hstack([i_mean+b_mean.mean(),w_mean.flatten()])
#randm_ed = b_mean-b_mean.mean()
fixed_ed = np.hstack([i_mean,w_mean.flatten()])
randm_ed = b_mean
fixed_ed_std = np.hstack([i_std, w_std.flatten()])
randm_ed_std = b_std
fitted_ed = np.dot(X,fixed_ed)+np.dot(Z,randm_ed).flatten()
fe_params['edward'] = pd.Series(fixed_ed, index=fe_params.index)
random_effects['edward'] = pd.Series(randm_ed.flatten(), index=random_effects.index)
#%% variational inference (using Edward, with sigma also modelled)
import edward as ed
import tensorflow as tf
from edward.models import Normal, Empirical, InverseGamma
#DATA
X1 = X[:,1:]
#X_mean = np.mean(X1,axis=0) # column means of X before centering
#X_cent = X1 - X_mean
#x_train, y_train = X_cent , Y
x_train,z_train,y_train = X1.astype('float32'), Z.astype('float32'), Y.flatten()
D = x_train.shape[1] # num features
Db = z_train.shape[1]
# MODEL
Wf = Normal(loc=tf.zeros([D]), scale=tf.ones([D]))
Wb = Normal(loc=tf.zeros([Db]), scale=tf.ones([Db]))
Ib = Normal(loc=tf.zeros(1), scale=tf.ones(1))
Xnew = tf.placeholder(tf.float32, shape=(None, D))
Znew = tf.placeholder(tf.float32, shape=(None, Db))
ynew = tf.placeholder(tf.float32, shape=(None, ))
sigma2 = InverseGamma(concentration=tf.ones(1)*.1, rate=tf.ones(1)*.1)
#sigma2 = Normal(loc=tf.zeros([1]), scale=tf.ones([1])*100)
y = Normal(loc=ed.dot(x_train, Wf)+ed.dot(z_train, Wb)+Ib, scale=tf.log(sigma2))
# INFERENCE
sess = ed.get_session()
T = 10000
qi = Empirical(params=tf.Variable(tf.zeros([T, 1])))
qw = Empirical(params=tf.Variable(tf.zeros([T, D])))
qb = Empirical(params=tf.Variable(tf.zeros([T, Db])))
qsigma2 = Empirical(params=tf.Variable(tf.ones([T,1])))
inference = ed.SGHMC({Wf: qw, Wb: qb, Ib: qi, sigma2: qsigma2}, data={y: y_train})
inference.run(step_size=.0005)
f, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True)
ax1.plot(qi.get_variables()[0].eval())
ax2.plot(qw.get_variables()[0].eval())
ax3.plot(qb.get_variables()[0].eval())
ax4.plot(qsigma2.get_variables()[0].eval())
burnin = int(T/2)
qi_post = qi.get_variables()[0].eval()[burnin:].mean(axis=0)
qw_post = qw.get_variables()[0].eval()[burnin:].mean(axis=0)
qb_post =qb.get_variables()[0].eval()[burnin:].mean(axis=0)
#fixed_ed = np.hstack([i_mean+b_mean.mean(),w_mean.flatten()])
#randm_ed = b_mean-b_mean.mean()
fixed_ed = np.hstack([qi_post,qw_post])
randm_ed = qb_post
fixed_ed_std = np.hstack([i_std, w_std.flatten()])
randm_ed_std = b_std
fitted_ed = np.dot(X,fixed_ed)+np.dot(Z,randm_ed).flatten()
fe_params['edward2'] = pd.Series(fixed_ed, index=fe_params.index)
random_effects['edward2'] = pd.Series(randm_ed.flatten(), index=random_effects.index)
#%% PyTorch
import torch
from torch.autograd import Variable
import torch.optim as optim
dtype = torch.FloatTensor
x_train,z_train,y_train = X.astype('float32'), Z.astype('float32'), Y.astype('float32')
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
Xt = Variable(torch.from_numpy(x_train), requires_grad=False)
Zt = Variable(torch.from_numpy(z_train), requires_grad=False)
y = Variable(torch.from_numpy(y_train), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
w1 = Variable(torch.randn(nfixed,1).type(dtype), requires_grad=True)
w2 = Variable(torch.randn(nrandm,1).type(dtype), requires_grad=True)
learning_rate = 1e-2
params = [w1,w2]
solver = optim.SGD(params, lr=learning_rate)
for t in range(10000):
# Forward pass: compute predicted y using operations on Variables; we compute
# ReLU using our custom autograd operation.
y_pred = Xt.mm(w1) + Zt.mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).mean()
if (t % 1000 == 0):
print(t, loss.data[0])
# # Manually zero the gradients before running the backward pass
# w1.grad.data.zero_()
# w2.grad.data.zero_()
#
# # Use autograd to compute the backward pass.
# loss.backward()
#
# # Update weights using gradient descent
# w1.data -= learning_rate * w1.grad.data
# w2.data -= learning_rate * w2.grad.data
# Backward
loss.backward()
# Update
solver.step()
# Housekeeping
solver.zero_grad()
# for p in params:
# p.grad.data.zero_()
fe_params['PyTorch'] = pd.Series(w1.data.numpy().flatten(), index=fe_params.index)
random_effects['PyTorch'] = pd.Series(w2.data.numpy().flatten(), index=random_effects.index)
#%% ploting
plotfitted(fe_params=fe_params,random_effects=random_effects,X=X,Z=Z,Y=Y)
| gpl-3.0 |
timmie/cartopy | lib/cartopy/mpl/geoaxes.py | 2 | 68738 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
This module defines the :class:`GeoAxes` class, for use with matplotlib.
When a matplotlib figure contains a GeoAxes the plotting commands can transform
plot results from source coordinates to the GeoAxes' target projection.
"""
from __future__ import (absolute_import, division, print_function)
import collections
import contextlib
import warnings
import weakref
import matplotlib as mpl
import matplotlib.artist
import matplotlib.axes
from matplotlib.image import imread
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.ticker as mticker
import numpy as np
import numpy.ma as ma
import shapely.geometry as sgeom
from cartopy import config
import cartopy.crs as ccrs
import cartopy.feature
import cartopy.img_transform
from cartopy.mpl.clip_path import clip_path
import cartopy.mpl.feature_artist as feature_artist
import cartopy.mpl.patch as cpatch
from cartopy.mpl.slippy_image_artist import SlippyImageArtist
from cartopy.vector_transform import vector_scalar_to_grid
assert matplotlib.__version__ >= '1.3', ('Cartopy is only supported with '
'matplotlib 1.3 or greater.')
_PATH_TRANSFORM_CACHE = weakref.WeakKeyDictionary()
"""
A nested mapping from path, source CRS, and target projection to the
resulting transformed paths::
{path: {(source_crs, target_projection): list_of_paths}}
Provides a significant performance boost for contours which, at
matplotlib 1.2.0 called transform_path_non_affine twice unnecessarily.
"""
# XXX call this InterCRSTransform
class InterProjectionTransform(mtransforms.Transform):
"""
Transforms coordinates from the source_projection to
the ``target_projection``.
"""
input_dims = 2
output_dims = 2
is_separable = False
has_inverse = True
def __init__(self, source_projection, target_projection):
"""
Create the transform object from the given projections.
Args:
* source_projection - A :class:`~cartopy.crs.CRS`.
* target_projection - A :class:`~cartopy.crs.CRS`.
"""
# assert target_projection is cartopy.crs.Projection
# assert source_projection is cartopy.crs.CRS
self.source_projection = source_projection
self.target_projection = target_projection
mtransforms.Transform.__init__(self)
def __repr__(self):
return ('< {!s} {!s} -> {!s} >'.format(self.__class__.__name__,
self.source_projection,
self.target_projection))
def transform_non_affine(self, xy):
"""
Transforms from source to target coordinates.
Args:
* xy - An (n,2) array of points in source coordinates.
Returns:
* An (n,2) array of transformed points in target coordinates.
"""
prj = self.target_projection
if isinstance(xy, np.ndarray):
return prj.transform_points(self.source_projection,
xy[:, 0], xy[:, 1])[:, 0:2]
else:
x, y = xy
x, y = prj.transform_point(x, y, self.source_projection)
return x, y
def transform_path_non_affine(self, src_path):
"""
Transforms from source to target coordinates.
Caches results, so subsequent calls with the same *src_path* argument
(and the same source and target projections) are faster.
Args:
* src_path - A matplotlib :class:`~matplotlib.path.Path` object
with vertices in source coordinates.
Returns
* A matplotlib :class:`~matplotlib.path.Path` with vertices
in target coordinates.
"""
mapping = _PATH_TRANSFORM_CACHE.get(src_path)
if mapping is not None:
key = (self.source_projection, self.target_projection)
result = mapping.get(key)
if result is not None:
return result
# Allow the vertices to be quickly transformed, if
# quick_vertices_transform allows it.
new_vertices = self.target_projection.quick_vertices_transform(
src_path.vertices, self.source_projection)
if new_vertices is not None:
if new_vertices is src_path.vertices:
return src_path
else:
return mpath.Path(new_vertices, src_path.codes)
if src_path.vertices.shape == (1, 2):
return mpath.Path(self.transform(src_path.vertices))
transformed_geoms = []
# Check whether this transform has the "force_path_ccw" attribute set.
# This is a cartopy extension to the Transform API to allow finer
# control of Path orientation handling (Path ordering is not important
# in matplotlib, but is in Cartopy).
geoms = cpatch.path_to_geos(src_path,
getattr(self, 'force_path_ccw', False))
for geom in geoms:
proj_geom = self.target_projection.project_geometry(
geom, self.source_projection)
transformed_geoms.append(proj_geom)
if not transformed_geoms:
result = mpath.Path(np.empty([0, 2]))
else:
paths = cpatch.geos_to_path(transformed_geoms)
if not paths:
return mpath.Path(np.empty([0, 2]))
points, codes = list(zip(*[cpatch.path_segments(path,
curves=False,
simplify=False)
for path in paths]))
result = mpath.Path(np.concatenate(points, 0),
np.concatenate(codes))
# store the result in the cache for future performance boosts
key = (self.source_projection, self.target_projection)
if mapping is None:
_PATH_TRANSFORM_CACHE[src_path] = {key: result}
else:
mapping[key] = result
return result
def inverted(self):
"""
Return a matplotlib :class:`~matplotlib.transforms.Transform`
from target to source coordinates.
"""
return InterProjectionTransform(self.target_projection,
self.source_projection)
class GeoAxes(matplotlib.axes.Axes):
"""
A subclass of :class:`matplotlib.axes.Axes` which represents a
map :class:`~cartopy.crs.Projection`.
This class replaces the matplotlib :class:`~matplotlib.axes.Axes` class
when created with the *projection* keyword. For example::
# Set up a standard map for latlon data.
geo_axes = pyplot.axes(projection=cartopy.crs.PlateCarree())
# Set up an OSGB map.
geo_axes = pyplot.subplot(2, 2, 1, projection=cartopy.crs.OSGB())
When a source projection is provided to one of it's plotting methods,
using the *transform* keyword, the standard matplotlib plot result is
transformed from source coordinates to the target projection. For example::
# Plot latlon data on an OSGB map.
pyplot.axes(projection=cartopy.crs.OSGB())
pyplot.contourf(x, y, data, transform=cartopy.crs.PlateCarree())
"""
def __init__(self, *args, **kwargs):
"""
Create a GeoAxes object using standard matplotlib
:class:`~matplotlib.axes.Axes` args and kwargs.
Kwargs:
* map_projection - The target :class:`~cartopy.crs.Projection` of
this Axes object.
All other args and keywords are passed through to
:class:`matplotlib.axes.Axes`.
"""
self.projection = kwargs.pop('map_projection')
"""The :class:`cartopy.crs.Projection` of this GeoAxes."""
self.outline_patch = None
"""The patch that provides the line bordering the projection."""
self.background_patch = None
"""The patch that provides the filled background of the projection."""
super(GeoAxes, self).__init__(*args, **kwargs)
self._gridliners = []
self.img_factories = []
self._done_img_factory = False
def add_image(self, factory, *args, **kwargs):
"""
Adds an image "factory" to the Axes.
Any image "factory" added, will be asked to retrieve an image
with associated metadata for a given bounding box at draw time.
The advantage of this approach is that the limits of the map
do not need to be known when adding the image factory, but can
be deferred until everything which can effect the limits has been
added.
Currently an image "factory" is just an object with
a ``image_for_domain`` method. Examples of image factories
are :class:`cartopy.io.img_nest.NestedImageCollection` and
:class:`cartopy.io.image_tiles.GoogleTiles`.
"""
if hasattr(factory, 'image_for_domain'):
# XXX TODO: Needs deprecating.
self.img_factories.append([factory, args, kwargs])
else:
# Args and kwargs not allowed.
assert not bool(args) and not bool(kwargs)
image = factory
try:
super(GeoAxes, self).add_image(image)
except AttributeError:
# If add_image method doesn't exist (only available from
# v1.4 onwards) we implement it ourselves.
self._set_artist_props(image)
self.images.append(image)
image._remove_method = lambda h: self.images.remove(h)
return image
@contextlib.contextmanager
def hold_limits(self, hold=True):
"""
Keep track of the original view and data limits for the life of this
context manager, optionally reverting any changes back to the original
values after the manager exits.
Parameters
----------
hold : bool (default True)
Whether to revert the data and view limits after the context
manager exits.
"""
data_lim = self.dataLim.frozen().get_points()
view_lim = self.viewLim.frozen().get_points()
other = (self.ignore_existing_data_limits,
self._autoscaleXon, self._autoscaleYon)
try:
yield
finally:
if hold:
self.dataLim.set_points(data_lim)
self.viewLim.set_points(view_lim)
(self.ignore_existing_data_limits,
self._autoscaleXon, self._autoscaleYon) = other
@matplotlib.artist.allow_rasterization
def draw(self, renderer=None, inframe=False):
"""
Extends the standard behaviour of :func:`matplotlib.axes.Axes.draw`.
Draws grid lines and image factory results before invoking standard
matplotlib drawing. A global range is used if no limits have yet
been set.
"""
# If data has been added (i.e. autoscale hasn't been turned off)
# then we should autoscale the view.
if self.get_autoscale_on() and self.ignore_existing_data_limits:
self.autoscale_view()
if self.outline_patch.reclip or self.background_patch.reclip:
clipped_path = clip_path(self.outline_patch.orig_path,
self.viewLim)
self.outline_patch._path = clipped_path
self.background_patch._path = clipped_path
for gl in self._gridliners:
gl._draw_gridliner(background_patch=self.background_patch)
self._gridliners = []
# XXX This interface needs a tidy up:
# image drawing on pan/zoom;
# caching the resulting image;
# buffering the result by 10%...;
if not self._done_img_factory:
for factory, args, kwargs in self.img_factories:
img, extent, origin = factory.image_for_domain(
self._get_extent_geom(factory.crs), args[0])
self.imshow(img, extent=extent, origin=origin,
transform=factory.crs, *args[1:], **kwargs)
self._done_img_factory = True
return matplotlib.axes.Axes.draw(self, renderer=renderer,
inframe=inframe)
def __str__(self):
return '< GeoAxes: %s >' % self.projection
def cla(self):
"""Clears the current axes and adds boundary lines."""
result = matplotlib.axes.Axes.cla(self)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
# Enable tight autoscaling.
self._tight = True
self.set_aspect('equal')
with self.hold_limits():
self._boundary()
# XXX consider a margin - but only when the map is not global...
# self._xmargin = 0.15
# self._ymargin = 0.15
self.dataLim.intervalx = self.projection.x_limits
self.dataLim.intervaly = self.projection.y_limits
return result
def format_coord(self, x, y):
"""Return a string formatted for the matplotlib GUI status bar."""
lon, lat = ccrs.Geodetic().transform_point(x, y, self.projection)
ns = 'N' if lat >= 0.0 else 'S'
ew = 'E' if lon >= 0.0 else 'W'
return u'%.4g, %.4g (%f\u00b0%s, %f\u00b0%s)' % (x, y, abs(lat),
ns, abs(lon), ew)
def coastlines(self, resolution='110m', color='black', **kwargs):
"""
Adds coastal **outlines** to the current axes from the Natural Earth
"coastline" shapefile collection.
Kwargs:
* resolution - a named resolution to use from the Natural Earth
dataset. Currently can be one of "110m", "50m", and
"10m".
.. note::
Currently no clipping is done on the coastlines before adding
them to the axes. This means, if very high resolution coastlines
are being used, performance is likely to be severely effected.
This should be resolved transparently by v0.5.
"""
kwargs['edgecolor'] = color
kwargs['facecolor'] = 'none'
feature = cartopy.feature.NaturalEarthFeature('physical', 'coastline',
resolution, **kwargs)
return self.add_feature(feature)
def tissot(self, rad_km=5e5, lons=None, lats=None, n_samples=80, **kwargs):
"""
Adds Tissot's indicatrices to the axes.
Kwargs:
* rad_km - The radius in km of the the circles to be drawn.
* lons - A numpy.ndarray, list or tuple of longitude values that
locate the centre of each circle. Specifying more than one
dimension allows individual points to be drawn whereas a
1D array produces a grid of points.
* lats - A numpy.ndarray, list or tuple of latitude values that
that locate the centre of each circle. See lons.
* n_samples - Integer number of points sampled around the
circumference of each circle.
**kwargs are passed through to `class:ShapelyFeature`.
"""
from cartopy import geodesic
geod = geodesic.Geodesic()
geoms = []
if lons is None:
lons = np.linspace(-180, 180, 6, endpoint=False)
else:
lons = np.asarray(lons)
if lats is None:
lats = np.linspace(-80, 80, 6)
else:
lats = np.asarray(lats)
if lons.ndim == 1 or lats.ndim == 1:
lons, lats = np.meshgrid(lons, lats)
lons, lats = lons.flatten(), lats.flatten()
if lons.shape != lats.shape:
raise ValueError('lons and lats must have the same shape.')
for i in range(len(lons)):
circle = geod.circle(lons[i], lats[i], rad_km,
n_samples=n_samples)
geoms.append(sgeom.Polygon(circle))
feature = cartopy.feature.ShapelyFeature(geoms, ccrs.Geodetic(),
**kwargs)
return self.add_feature(feature)
def natural_earth_shp(self, name='land', resolution='110m',
category='physical', **kwargs):
"""
Adds the geometries from the specified Natural Earth shapefile to the
Axes as a :class:`~matplotlib.collections.PathCollection`.
``**kwargs`` are passed through to the
:class:`~matplotlib.collections.PathCollection` constructor.
Returns the created :class:`~matplotlib.collections.PathCollection`.
.. note::
Currently no clipping is done on the geometries before adding them
to the axes. This means, if very high resolution geometries are
being used, performance is likely to be severely effected. This
should be resolved transparently by v0.5.
"""
warnings.warn('This method has been deprecated.'
' Please use `add_feature` instead.')
kwargs.setdefault('edgecolor', 'face')
kwargs.setdefault('facecolor', cartopy.feature.COLORS['land'])
feature = cartopy.feature.NaturalEarthFeature(category, name,
resolution, **kwargs)
return self.add_feature(feature)
def add_feature(self, feature, **kwargs):
"""
Adds the given :class:`~cartopy.feature.Feature` instance to the axes.
Args:
* feature:
An instance of :class:`~cartopy.feature.Feature`.
Kwargs:
Keyword arguments to be used when drawing the feature. This allows
standard matplotlib control over aspects such as 'facecolor',
'alpha', etc.
Returns:
* A :class:`cartopy.mpl.feature_artist.FeatureArtist`
instance responsible for drawing the feature.
"""
# Instantiate an artist to draw the feature and add it to the axes.
artist = feature_artist.FeatureArtist(feature, **kwargs)
return self.add_artist(artist)
def add_geometries(self, geoms, crs, **kwargs):
"""
Add the given shapely geometries (in the given crs) to the axes.
Args:
* geoms:
A collection of shapely geometries.
* crs:
The cartopy CRS in which the provided geometries are defined.
Kwargs:
Keyword arguments to be used when drawing this feature.
Returns:
A :class:`cartopy.mpl.feature_artist.FeatureArtist`
instance responsible for drawing the geometries.
"""
feature = cartopy.feature.ShapelyFeature(geoms, crs, **kwargs)
return self.add_feature(feature)
def get_extent(self, crs=None):
"""
Get the extent (x0, x1, y0, y1) of the map in the given coordinate
system.
If no crs is given, the returned extents' coordinate system will be
the CRS of this Axes.
"""
p = self._get_extent_geom(crs)
r = p.bounds
x1, y1, x2, y2 = r
return x1, x2, y1, y2
def _get_extent_geom(self, crs=None):
# Perform the calculations for get_extent(), which just repackages it.
with self.hold_limits():
if self.get_autoscale_on():
self.autoscale_view()
[x1, y1], [x2, y2] = self.viewLim.get_points()
domain_in_src_proj = sgeom.Polygon([[x1, y1], [x2, y1],
[x2, y2], [x1, y2],
[x1, y1]])
# Determine target projection based on requested CRS.
if crs is None:
proj = self.projection
elif isinstance(crs, ccrs.Projection):
proj = crs
else:
# Attempt to select suitable projection for
# non-projection CRS.
if isinstance(crs, ccrs.RotatedGeodetic):
proj = ccrs.RotatedPole(crs.proj4_params['lon_0'] - 180,
crs.proj4_params['o_lat_p'])
warnings.warn('Approximating coordinate system {!r} with a '
'RotatedPole projection.'.format(crs))
elif hasattr(crs, 'is_geodetic') and crs.is_geodetic():
proj = ccrs.PlateCarree(crs.globe)
warnings.warn('Approximating coordinate system {!r} with the '
'PlateCarree projection.'.format(crs))
else:
raise ValueError('Cannot determine extent in'
' coordinate system {!r}'.format(crs))
# Calculate intersection with boundary and project if necesary.
boundary_poly = sgeom.Polygon(self.projection.boundary)
if proj != self.projection:
# Erode boundary by threshold to avoid transform issues.
# This is a workaround for numerical issues at the boundary.
eroded_boundary = boundary_poly.buffer(-self.projection.threshold)
geom_in_src_proj = eroded_boundary.intersection(
domain_in_src_proj)
geom_in_crs = proj.project_geometry(geom_in_src_proj,
self.projection)
else:
geom_in_crs = boundary_poly.intersection(domain_in_src_proj)
return geom_in_crs
def set_extent(self, extents, crs=None):
"""
Set the extent (x0, x1, y0, y1) of the map in the given
coordinate system.
If no crs is given, the extents' coordinate system will be assumed
to be the Geodetic version of this axes' projection.
"""
# TODO: Implement the same semantics as plt.xlim and
# plt.ylim - allowing users to set None for a minimum and/or
# maximum value
x1, x2, y1, y2 = extents
domain_in_crs = sgeom.polygon.LineString([[x1, y1], [x2, y1],
[x2, y2], [x1, y2],
[x1, y1]])
projected = None
# Sometimes numerical issues cause the projected vertices of the
# requested extents to appear outside the projection domain.
# This results in an empty geometry, which has an empty `bounds`
# tuple, which causes an unpack error.
# This workaround avoids using the projection when the requested
# extents are obviously the same as the projection domain.
try_workaround = ((crs is None and
isinstance(self.projection, ccrs.PlateCarree)) or
crs == self.projection)
if try_workaround:
boundary = self.projection.boundary
if boundary.equals(domain_in_crs):
projected = boundary
if projected is None:
projected = self.projection.project_geometry(domain_in_crs, crs)
try:
# This might fail with an unhelpful error message ('need more
# than 0 values to unpack') if the specified extents fall outside
# the projection extents, so try and give a better error message.
x1, y1, x2, y2 = projected.bounds
except ValueError:
msg = ('Failed to determine the required bounds in projection '
'coordinates. Check that the values provided are within '
'the valid range (x_limits=[{xlim[0]}, {xlim[1]}], '
'y_limits=[{ylim[0]}, {ylim[1]}]).')
raise ValueError(msg.format(xlim=self.projection.x_limits,
ylim=self.projection.y_limits))
self.set_xlim([x1, x2])
self.set_ylim([y1, y2])
def set_global(self):
"""
Set the extent of the Axes to the limits of the projection.
.. note::
In some cases where the projection has a limited sensible range
the ``set_global`` method does not actually make the whole globe
visible. Instead, the most appropriate extents will be used (e.g.
Ordnance Survey UK will set the extents to be around the British
Isles.
"""
self.set_xlim(self.projection.x_limits)
self.set_ylim(self.projection.y_limits)
def set_xticks(self, ticks, minor=False, crs=None):
"""
Set the x ticks.
Args:
* ticks - list of floats denoting the desired position of x ticks.
Kwargs:
* minor - boolean flag indicating whether the ticks should be minor
ticks i.e. small and unlabelled (default is False).
* crs - An instance of :class:`~cartopy.crs.CRS` indicating the
coordinate system of the provided tick values. If no
coordinate system is specified then the values are assumed
to be in the coordinate system of the projection.
Only transformations from one rectangular coordinate system
to another rectangular coordinate system are supported.
.. note::
This interface is subject to change whilst functionality is added
to support other map projections.
"""
# Project ticks if crs differs from axes' projection
if crs is not None and crs != self.projection:
if not isinstance(crs, (ccrs._RectangularProjection,
ccrs.Mercator)) or \
not isinstance(self.projection,
(ccrs._RectangularProjection,
ccrs.Mercator)):
raise RuntimeError('Cannot handle non-rectangular coordinate '
'systems.')
proj_xyz = self.projection.transform_points(crs,
np.asarray(ticks),
np.zeros(len(ticks)))
xticks = proj_xyz[..., 0]
else:
xticks = ticks
# Switch on drawing of x axis
self.xaxis.set_visible(True)
return super(GeoAxes, self).set_xticks(xticks, minor)
def set_yticks(self, ticks, minor=False, crs=None):
"""
Set the y ticks.
Args:
* ticks - list of floats denoting the desired position of y ticks.
Kwargs:
* minor - boolean flag indicating whether the ticks should be minor
ticks i.e. small and unlabelled (default is False).
* crs - An instance of :class:`~cartopy.crs.CRS` indicating the
coordinate system of the provided tick values. If no
coordinate system is specified then the values are assumed
to be in the coordinate system of the projection.
Only transformations from one rectangular coordinate system
to another rectangular coordinate system are supported.
.. note::
This interface is subject to change whilst functionality is added
to support other map projections.
"""
# Project ticks if crs differs from axes' projection
if crs is not None and crs != self.projection:
if not isinstance(crs, (ccrs._RectangularProjection,
ccrs.Mercator)) or \
not isinstance(self.projection,
(ccrs._RectangularProjection,
ccrs.Mercator)):
raise RuntimeError('Cannot handle non-rectangular coordinate '
'systems.')
proj_xyz = self.projection.transform_points(crs,
np.zeros(len(ticks)),
np.asarray(ticks))
yticks = proj_xyz[..., 1]
else:
yticks = ticks
# Switch on drawing of y axis
self.yaxis.set_visible(True)
return super(GeoAxes, self).set_yticks(yticks, minor)
def stock_img(self, name='ne_shaded'):
"""
Add a standard image to the map.
Currently, the only (and default) option is a downsampled version of
the Natural Earth shaded relief raster.
"""
if name == 'ne_shaded':
import os
source_proj = ccrs.PlateCarree()
fname = os.path.join(config["repo_data_dir"],
'raster', 'natural_earth',
'50-natural-earth-1-downsampled.png')
return self.imshow(imread(fname), origin='upper',
transform=source_proj,
extent=[-180, 180, -90, 90])
else:
raise ValueError('Unknown stock image %r.' % name)
def add_raster(self, raster_source, **slippy_image_kwargs):
"""
Add the given raster source to the GeoAxes.
Parameters
----------
raster_source : :class:`cartopy.io.RasterSource` like instance
``raster_source`` may be any object which implements the
RasterSource interface, including instances of objects such as
:class:`~cartopy.io.ogc_clients.WMSRasterSource` and
:class:`~cartopy.io.ogc_clients.WMTSRasterSource`. Note that image
retrievals are done at draw time, not at creation time.
"""
# Allow a fail-fast error if the raster source cannot provide
# images in the current projection.
raster_source.validate_projection(self.projection)
img = SlippyImageArtist(self, raster_source, **slippy_image_kwargs)
with self.hold_limits():
self.add_image(img)
return img
def _regrid_shape_aspect(self, regrid_shape, target_extent):
"""
Helper for setting regridding shape which is used in several
plotting methods.
"""
if not isinstance(regrid_shape, collections.Sequence):
target_size = int(regrid_shape)
x_range, y_range = np.diff(target_extent)[::2]
desired_aspect = x_range / y_range
if x_range >= y_range:
regrid_shape = (target_size * desired_aspect, target_size)
else:
regrid_shape = (target_size, target_size / desired_aspect)
return regrid_shape
def imshow(self, img, *args, **kwargs):
"""
Add the "transform" keyword to :func:`~matplotlib.pyplot.imshow'.
Parameters
----------
transform : :class:`~cartopy.crs.Projection` or matplotlib transform
The coordinate system in which the given image is rectangular.
regrid_shape : int or pair of ints
The shape of the desired image if it needs to be transformed.
If a single integer is given then that will be used as the minimum
length dimension, while the other dimension will be scaled up
according to the target extent's aspect ratio. The default is for
the minimum dimension of a transformed image to have length 750,
so for an image being transformed into a global PlateCarree
projection the resulting transformed image would have a shape of
``(750, 1500)``.
extent : tuple
The corner coordinates of the image in the form
``(left, right, bottom, top)``. The coordinates should be in the
coordinate system passed to the transform keyword.
origin : {'lower', 'upper'}
The origin of the vertical pixels. See
:func:`matplotlib.pyplot.imshow` for further details. Default
is ``'lower'``.
"""
transform = kwargs.pop('transform', None)
if 'update_datalim' in kwargs:
raise ValueError('The update_datalim keyword has been removed in '
'imshow. To hold the data and view limits see '
'GeoAxes.hold_limits.')
kwargs.setdefault('origin', 'lower')
same_projection = (isinstance(transform, ccrs.Projection) and
self.projection == transform)
if transform is None or transform == self.transData or same_projection:
if isinstance(transform, ccrs.Projection):
transform = transform._as_mpl_transform(self)
result = matplotlib.axes.Axes.imshow(self, img, *args, **kwargs)
else:
extent = kwargs.pop('extent', None)
img = np.asanyarray(img)
if kwargs['origin'] == 'upper':
# It is implicitly assumed by the regridding operation that the
# origin of the image is 'lower', so simply adjust for that
# here.
img = img[::-1]
kwargs['origin'] = 'lower'
if not isinstance(transform, ccrs.Projection):
raise ValueError('Expected a projection subclass. Cannot '
'handle a %s in imshow.' % type(transform))
target_extent = self.get_extent(self.projection)
regrid_shape = kwargs.pop('regrid_shape', 750)
regrid_shape = self._regrid_shape_aspect(regrid_shape,
target_extent)
warp_array = cartopy.img_transform.warp_array
img, extent = warp_array(img,
source_proj=transform,
source_extent=extent,
target_proj=self.projection,
target_res=regrid_shape,
target_extent=target_extent,
mask_extrapolated=True,
)
# As a workaround to a matplotlib limitation, turn any images
# which are RGB with a mask into RGBA images with an alpha
# channel.
if (isinstance(img, np.ma.MaskedArray) and
img.shape[2:3] == (3, ) and
img.mask is not False):
old_img = img
img = np.zeros(img.shape[:2] + (4, ), dtype=img.dtype)
img[:, :, 0:3] = old_img
# Put an alpha channel in if the image was masked.
img[:, :, 3] = ~ np.any(old_img.mask, axis=2)
if img.dtype.kind == 'u':
img[:, :, 3] *= 255
result = matplotlib.axes.Axes.imshow(self, img, *args,
extent=extent, **kwargs)
# clip the image. This does not work as the patch moves with mouse
# movement, but the clip path doesn't
# This could definitely be fixed in matplotlib
# if result.get_clip_path() in [None, self.patch]:
# # image does not already have clipping set, clip to axes patch
# result.set_clip_path(self.outline_patch)
return result
def gridlines(self, crs=None, draw_labels=False, xlocs=None,
ylocs=None, **kwargs):
"""
Automatically adds gridlines to the axes, in the given coordinate
system, at draw time.
Kwargs:
* crs
The :class:`cartopy._crs.CRS` defining the coordinate system in
which gridlines are drawn.
Default is :class:`cartopy.crs.PlateCarree`.
* draw_labels
Label gridlines like axis ticks, around the edge.
* xlocs
An iterable of gridline locations or a
:class:`matplotlib.ticker.Locator` instance which will be used to
determine the locations of the gridlines in the x-coordinate of
the given CRS. Defaults to None, which implies automatic locating
of the gridlines.
* ylocs
An iterable of gridline locations or a
:class:`matplotlib.ticker.Locator` instance which will be used to
determine the locations of the gridlines in the y-coordinate of
the given CRS. Defaults to None, which implies automatic locating
of the gridlines.
Returns:
A :class:`cartopy.mpl.gridliner.Gridliner` instance.
All other keywords control line properties. These are passed through
to :class:`matplotlib.collections.Collection`.
"""
if crs is None:
crs = ccrs.PlateCarree()
from cartopy.mpl.gridliner import Gridliner
if xlocs is not None and not isinstance(xlocs, mticker.Locator):
xlocs = mticker.FixedLocator(xlocs)
if ylocs is not None and not isinstance(ylocs, mticker.Locator):
ylocs = mticker.FixedLocator(ylocs)
gl = Gridliner(
self, crs=crs, draw_labels=draw_labels, xlocator=xlocs,
ylocator=ylocs, collection_kwargs=kwargs)
self._gridliners.append(gl)
return gl
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
# generate some axes spines, as some Axes super class machinery
# requires them. Just make them invisible
spines = matplotlib.axes.Axes._gen_axes_spines(self,
locations=locations,
offset=offset,
units=units)
for spine in spines.values():
spine.set_visible(False)
return spines
def _boundary(self):
"""
Adds the map's boundary to this GeoAxes, attaching the appropriate
artists to :data:`.outline_patch` and :data:`.background_patch`.
.. note::
The boundary is not the ``axes.patch``. ``axes.patch``
is made invisible by this method - its only remaining
purpose is to provide a rectilinear clip patch for
all Axes artists.
"""
# Hide the old "background" patch used by matplotlib - it is not
# used by cartopy's GeoAxes.
self.patch.set_facecolor((1, 1, 1, 0))
self.patch.set_edgecolor((0.5, 0.5, 0.5))
self.patch.set_visible(False)
self.background_patch = None
self.outline_patch = None
path, = cpatch.geos_to_path(self.projection.boundary)
# Get the outline path in terms of self.transData
proj_to_data = self.projection._as_mpl_transform(self) - self.transData
trans_path = proj_to_data.transform_path(path)
# Set the boundary - we can make use of the rectangular clipping.
self.set_boundary(trans_path, use_as_clip_path=False)
# Attach callback events for when the xlim or ylim are changed. This
# is what triggers the patches to be re-clipped at draw time.
self.callbacks.connect('xlim_changed', _trigger_patch_reclip)
self.callbacks.connect('ylim_changed', _trigger_patch_reclip)
def set_boundary(self, path, transform=None, use_as_clip_path=True):
"""
Given a path, update the :data:`.outline_patch` and
:data:`.background_patch` to take its shape.
Parameters
----------
path : :class:`matplotlib.path.Path`
The path of the desired boundary.
transform : None or :class:`matplotlib.transforms.Transform`
The coordinate system of the given path. Currently this must be
convertible to data coordinates, and therefore cannot extend beyond
the limits of the axes' projection.
use_as_clip_path : bool
Whether axes.patch should be updated. Updating axes.patch means
that any artists subsequently created will inherit clipping from
this path, rather than the standard unit square in axes
coordinates.
"""
if transform is None:
transform = self.transData
if isinstance(transform, cartopy.crs.CRS):
transform = transform._as_mpl_transform(self)
if self.background_patch is None:
background = matplotlib.patches.PathPatch(path, edgecolor='none',
facecolor='white',
zorder=-1, clip_on=False,
transform=transform)
else:
background = matplotlib.patches.PathPatch(path, zorder=-1,
clip_on=False)
background.update_from(self.background_patch)
self.background_patch.remove()
background.set_transform(transform)
if self.outline_patch is None:
outline = matplotlib.patches.PathPatch(path, edgecolor='black',
facecolor='none',
zorder=2.5, clip_on=False,
transform=transform)
else:
outline = matplotlib.patches.PathPatch(path, zorder=2.5,
clip_on=False)
outline.update_from(self.outline_patch)
self.outline_patch.remove()
outline.set_transform(transform)
# Attach the original path to the patches. This will be used each time
# a new clipped path is calculated.
outline.orig_path = path
background.orig_path = path
# Attach a "reclip" attribute, which determines if the patch's path is
# reclipped before drawing. A callback is used to change the "reclip"
# state.
outline.reclip = True
background.reclip = True
# Add the patches to the axes, and also make them available as
# attributes.
self.background_patch = background
self.outline_patch = outline
if use_as_clip_path:
self.patch = background
with self.hold_limits():
self.add_patch(outline)
self.add_patch(background)
def contour(self, *args, **kwargs):
"""
Add the "transform" keyword to :func:`~matplotlib.pyplot.contour'.
Extra kwargs:
transform - a :class:`~cartopy.crs.Projection`.
"""
t = kwargs.get('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical contouring is not supported - '
' consider using PlateCarree/RotatedPole.')
if isinstance(t, ccrs.Projection):
kwargs['transform'] = t._as_mpl_transform(self)
else:
kwargs['transform'] = t
result = matplotlib.axes.Axes.contour(self, *args, **kwargs)
self.autoscale_view()
return result
def contourf(self, *args, **kwargs):
"""
Add the "transform" keyword to :func:`~matplotlib.pyplot.contourf'.
Extra kwargs:
transform - a :class:`~cartopy.crs.Projection`.
"""
t = kwargs.get('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical contouring is not supported - '
' consider using PlateCarree/RotatedPole.')
if isinstance(t, ccrs.Projection):
kwargs['transform'] = t = t._as_mpl_transform(self)
else:
kwargs['transform'] = t
# Set flag to indicate correcting orientation of paths if not ccw
if isinstance(t, mtransforms.Transform):
for sub_trans, _ in t._iter_break_from_left_to_right():
if isinstance(sub_trans, InterProjectionTransform):
if not hasattr(sub_trans, 'force_path_ccw'):
sub_trans.force_path_ccw = True
result = matplotlib.axes.Axes.contourf(self, *args, **kwargs)
# We need to compute the dataLim correctly for contours.
if matplotlib.__version__ >= '1.4':
extent = mtransforms.Bbox.union([col.get_datalim(self.transData)
for col in result.collections])
self.dataLim.update_from_data_xy(extent.get_points())
self.autoscale_view()
return result
def scatter(self, *args, **kwargs):
"""
Add the "transform" keyword to :func:`~matplotlib.pyplot.scatter'.
Extra kwargs:
transform - a :class:`~cartopy.crs.Projection`.
"""
t = kwargs.get('transform', None)
# Keep this bit - even at mpl v1.2
if t is None:
t = self.projection
if hasattr(t, '_as_mpl_transform'):
kwargs['transform'] = t._as_mpl_transform(self)
# exclude Geodetic as a vaild source CS
if (isinstance(kwargs.get('transform', None),
InterProjectionTransform) and
kwargs['transform'].source_projection.is_geodetic()):
raise ValueError('Cartopy cannot currently do spherical '
'contouring. The source CRS cannot be a '
'geodetic, consider using the cyllindrical form '
'(PlateCarree or RotatedPole).')
result = matplotlib.axes.Axes.scatter(self, *args, **kwargs)
self.autoscale_view()
return result
def pcolormesh(self, *args, **kwargs):
"""
Add the "transform" keyword to :func:`~matplotlib.pyplot.pcolormesh'.
Extra kwargs:
transform - a :class:`~cartopy.crs.Projection`.
"""
t = kwargs.get('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical pcolormesh is not supported - '
' consider using PlateCarree/RotatedPole.')
kwargs.setdefault('transform', t)
result = self._pcolormesh_patched(*args, **kwargs)
self.autoscale_view()
return result
def _pcolormesh_patched(self, *args, **kwargs):
"""
A temporary, modified duplicate of
:func:`~matplotlib.pyplot.pcolormesh'.
This function contains a workaround for a matplotlib issue
and will be removed once the issue has been resolved.
https://github.com/matplotlib/matplotlib/pull/1314
"""
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
import matplotlib.collections as mcoll
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat').lower()
antialiased = kwargs.pop('antialiased', False)
kwargs.setdefault('edgecolors', 'None')
allmatch = (shading == 'gouraud')
X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = C.ravel()
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords,
antialiased=antialiased, shading=shading, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform) and
hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([X, Y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
X = transformed_pts[..., 0]
Y = transformed_pts[..., 1]
########################
# PATCH
# XXX Non-standard matplotlib thing.
no_inf = (X != np.inf) & (Y != np.inf)
X = X[no_inf]
Y = Y[no_inf]
# END OF PATCH
##############
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
collection._corners = corners
collection.get_datalim = lambda transData: collection._corners
self.update_datalim(corners)
self.add_collection(collection)
self.autoscale_view()
########################
# PATCH
# XXX Non-standard matplotlib thing.
# Handle a possible wrap around for rectangular projections.
t = kwargs.get('transform', None)
if isinstance(t, ccrs.CRS):
wrap_proj_types = (ccrs._RectangularProjection,
ccrs._WarpedRectangularProjection,
ccrs.InterruptedGoodeHomolosine,
ccrs.Mercator)
if isinstance(t, wrap_proj_types) and \
isinstance(self.projection, wrap_proj_types):
C = C.reshape((Ny - 1, Nx - 1))
transformed_pts = transformed_pts.reshape((Ny, Nx, 2))
# compute the vertical line angles of the pcolor in
# transformed coordinates
with np.errstate(invalid='ignore'):
horizontal_vert_angles = np.arctan2(
np.diff(transformed_pts[..., 0], axis=1),
np.diff(transformed_pts[..., 1], axis=1)
)
# if the change in angle is greater than 90 degrees (absolute),
# then mark it for masking later on.
dx_horizontal = np.diff(horizontal_vert_angles)
to_mask = ((np.abs(dx_horizontal) > np.pi / 2) |
np.isnan(dx_horizontal))
if np.any(to_mask):
if collection.get_cmap()._rgba_bad[3] != 0.0:
warnings.warn("The colormap's 'bad' has been set, but "
"in order to wrap pcolormesh across the "
"map it must be fully transparent.")
# at this point C has a shape of (Ny-1, Nx-1), to_mask has
# a shape of (Ny, Nx-2) and pts has a shape of (Ny*Nx, 2)
mask = np.zeros(C.shape, dtype=np.bool)
# mask out the neighbouring cells if there was a cell
# found with an angle change of more than pi/2 . NB.
# Masking too much only has a detrimental impact on
# performance.
to_mask_y_shift = to_mask[:-1, :]
mask[:, :-1][to_mask_y_shift] = True
mask[:, 1:][to_mask_y_shift] = True
to_mask_x_shift = to_mask[1:, :]
mask[:, :-1][to_mask_x_shift] = True
mask[:, 1:][to_mask_x_shift] = True
C_mask = getattr(C, 'mask', None)
if C_mask is not None:
dmask = mask | C_mask
else:
dmask = mask
# create the masked array to be used with this pcolormesh
pcolormesh_data = np.ma.array(C, mask=mask)
collection.set_array(pcolormesh_data.ravel())
# now that the pcolormesh has masked the bad values,
# create a pcolor with just those values that were masked
pcolor_data = pcolormesh_data.copy()
# invert the mask
pcolor_data.mask = ~pcolor_data.mask
# remember to re-apply the original data mask to the array
if C_mask is not None:
pcolor_data.mask = pcolor_data.mask | C_mask
pts = pts.reshape((Ny, Nx, 2))
if np.any(~pcolor_data.mask):
# plot with slightly lower zorder to avoid odd issue
# where the main plot is obscured
zorder = collection.zorder - .1
kwargs.pop('zorder', None)
kwargs.setdefault('snap', False)
pcolor_col = self.pcolor(pts[..., 0], pts[..., 1],
pcolor_data, zorder=zorder,
**kwargs)
pcolor_col.set_cmap(cmap)
pcolor_col.set_norm(norm)
pcolor_col.set_clim(vmin, vmax)
# scale the data according to the *original* data
pcolor_col.norm.autoscale_None(C)
# put the pcolor_col on the pcolormesh collection so
# that if really necessary, users can do things post
# this method
collection._wrapped_collection_fix = pcolor_col
# Clip the QuadMesh to the projection boundary, which is required
# to keep the shading inside the projection bounds.
collection.set_clip_path(self.outline_patch)
# END OF PATCH
##############
return collection
def pcolor(self, *args, **kwargs):
"""
Add the "transform" keyword to :func:`~matplotlib.pyplot.pcolor'.
Extra kwargs:
transform - a :class:`~cartopy.crs.Projection`.
"""
t = kwargs.get('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical pcolor is not supported - '
' consider using PlateCarree/RotatedPole.')
kwargs.setdefault('transform', t)
result = matplotlib.axes.Axes.pcolor(self, *args, **kwargs)
# Update the datalim for this pcolor.
limits = result.get_datalim(self.axes.transData)
self.axes.update_datalim(limits)
self.autoscale_view()
return result
def quiver(self, x, y, u, v, *args, **kwargs):
"""
Plot a field of arrows.
Extra Kwargs:
* transform: :class:`cartopy.crs.Projection` or matplotlib transform
The coordinate system in which the vectors are defined.
* regrid_shape: int or 2-tuple of ints
If given, specifies that the points where the arrows are
located will be interpolated onto a regular grid in
projection space. If a single integer is given then that
will be used as the minimum grid length dimension, while the
other dimension will be scaled up according to the target
extent's aspect ratio. If a pair of ints are given they
determine the grid length in the x and y directions
respectively.
* target_extent: 4-tuple
If given, specifies the extent in the target CRS that the
regular grid defined by *regrid_shape* will have. Defaults
to the current extent of the map projection.
See :func:`matplotlib.pyplot.quiver` for details on arguments
and other keyword arguments.
.. note::
The vector components must be defined as grid eastward and
grid northward.
"""
t = kwargs.get('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical quiver is not supported - '
' consider using PlateCarree/RotatedPole.')
if isinstance(t, ccrs.Projection):
kwargs['transform'] = t._as_mpl_transform(self)
else:
kwargs['transform'] = t
regrid_shape = kwargs.pop('regrid_shape', None)
target_extent = kwargs.pop('target_extent',
self.get_extent(self.projection))
if regrid_shape is not None:
# If regridding is required then we'll be handling transforms
# manually and plotting in native coordinates.
regrid_shape = self._regrid_shape_aspect(regrid_shape,
target_extent)
if args:
# Interpolate color array as well as vector components.
x, y, u, v, c = vector_scalar_to_grid(
t, self.projection, regrid_shape, x, y, u, v, args[0],
target_extent=target_extent)
args = (c,) + args[1:]
else:
x, y, u, v = vector_scalar_to_grid(
t, self.projection, regrid_shape, x, y, u, v,
target_extent=target_extent)
kwargs.pop('transform', None)
elif t != self.projection:
# Transform the vectors if the projection is not the same as the
# data transform.
if (x.ndim == 1 and y.ndim == 1) and (x.shape != u.shape):
x, y = np.meshgrid(x, y)
u, v = self.projection.transform_vectors(t, x, y, u, v)
return matplotlib.axes.Axes.quiver(self, x, y, u, v, *args, **kwargs)
def barbs(self, x, y, u, v, *args, **kwargs):
"""
Plot a 2-D field of barbs.
Extra Kwargs:
* transform: :class:`cartopy.crs.Projection` or matplotlib transform
The coordinate system in which the vectors are defined.
* regrid_shape: int or 2-tuple of ints
If given, specifies that the points where the arrows are
located will be interpolated onto a regular grid in
projection space. If a single integer is given then that
will be used as the minimum grid length dimension, while the
other dimension will be scaled up according to the target
extent's aspect ratio. If a pair of ints are given they
determine the grid length in the x and y directions
respectively.
* target_extent: 4-tuple
If given, specifies the extent in the target CRS that the
regular grid defined by *regrid_shape* will have. Defaults
to the current extent of the map projection.
See :func:`matplotlib.pyplot.barbs` for details on arguments
and keyword arguments.
.. note::
The vector components must be defined as grid eastward and
grid northward.
"""
t = kwargs.get('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical barbs are not supported - '
' consider using PlateCarree/RotatedPole.')
if isinstance(t, ccrs.Projection):
kwargs['transform'] = t._as_mpl_transform(self)
else:
kwargs['transform'] = t
regrid_shape = kwargs.pop('regrid_shape', None)
target_extent = kwargs.pop('target_extent',
self.get_extent(self.projection))
if regrid_shape is not None:
# If regridding is required then we'll be handling transforms
# manually and plotting in native coordinates.
regrid_shape = self._regrid_shape_aspect(regrid_shape,
target_extent)
if args:
# Interpolate color array as well as vector components.
x, y, u, v, c = vector_scalar_to_grid(
t, self.projection, regrid_shape, x, y, u, v, args[0],
target_extent=target_extent)
args = (c,) + args[1:]
else:
x, y, u, v = vector_scalar_to_grid(
t, self.projection, regrid_shape, x, y, u, v,
target_extent=target_extent)
kwargs.pop('transform', None)
elif t != self.projection:
# Transform the vectors if the projection is not the same as the
# data transform.
if x.ndim == 1 and y.ndim == 1:
x, y = np.meshgrid(x, y)
u, v = self.projection.transform_vectors(t, x, y, u, v)
return matplotlib.axes.Axes.barbs(self, x, y, u, v, *args, **kwargs)
def streamplot(self, x, y, u, v, **kwargs):
"""
Draws streamlines of a vector flow.
Extra Kwargs:
* transform: :class:`cartopy.crs.Projection` or matplotlib transform
The coordinate system in which the vector field is defined.
See :func:`matplotlib.pyplot.streamplot` for details on arguments
and keyword arguments.
.. note::
The vector components must be defined as grid eastward and
grid northward.
"""
t = kwargs.pop('transform', None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError('invalid transform:'
' Spherical streamplot is not supported - '
' consider using PlateCarree/RotatedPole.')
# Regridding is required for streamplot, it must have an evenly spaced
# grid to work correctly. Choose our destination grid based on the
# density keyword. The grid need not be bigger than the grid used by
# the streamplot integrator.
density = kwargs.get('density', 1)
if np.isscalar(density):
regrid_shape = [int(30 * density)] * 2
else:
regrid_shape = [int(25 * d) for d in density]
# The color and linewidth keyword arguments can be arrays so they will
# need to be gridded also.
c = kwargs.get('color', None)
l = kwargs.get('linewidth', None)
scalars = []
color_array = isinstance(c, np.ndarray)
linewidth_array = isinstance(l, np.ndarray)
if color_array:
scalars.append(c)
if linewidth_array:
scalars.append(l)
# Do the regridding including any scalar fields.
target_extent = self.get_extent(self.projection)
gridded = vector_scalar_to_grid(t, self.projection, regrid_shape,
x, y, u, v, *scalars,
target_extent=target_extent)
x, y, u, v = gridded[:4]
# If scalar fields were regridded then replace the appropriate keyword
# arguments with the gridded arrays.
scalars = list(gridded[4:])
if linewidth_array:
kwargs['linewidth'] = scalars.pop()
if color_array:
kwargs['color'] = ma.masked_invalid(scalars.pop())
with warnings.catch_warnings():
# The workaround for nan values in streamplot colors gives rise to
# a warning which is not at all important so it is hidden from the
# user to avoid confusion.
message = 'Warning: converting a masked element to nan.'
warnings.filterwarnings('ignore', message=message,
category=UserWarning)
sp = matplotlib.axes.Axes.streamplot(self, x, y, u, v, **kwargs)
return sp
def add_wmts(self, wmts, layer_name, **kwargs):
"""
Add the specified WMTS layer to the axes.
This function requires owslib and PIL to work.
Args:
* wmts - The URL of the WMTS, or an
owslib.wmts.WebMapTileService instance.
* layer_name - The name of the layer to use.
All other keywords are passed through to the construction of the
image artist. See :meth:`~matplotlib.axes.Axes.imshow()` for
more details.
"""
from cartopy.io.ogc_clients import WMTSRasterSource
wmts = WMTSRasterSource(wmts, layer_name)
return self.add_raster(wmts, **kwargs)
def add_wms(self, wms, layers, wms_kwargs=None, **kwargs):
"""
Add the specified WMS layer to the axes.
This function requires owslib and PIL to work.
Parameters
----------
wms : string or :class:`owslib.wms.WebMapService` instance
The web map service URL or owslib WMS instance to use.
layers : string or iterable of string
The name of the layer(s) to use.
wms_kwargs : dict or None
Passed through to the
:class:`~cartopy.io.ogc_clients.WMSRasterSource`
constructor's ``getmap_extra_kwargs`` for defining getmap time
keyword arguments.
All other keywords are passed through to the construction of the
image artist. See :meth:`~matplotlib.axes.Axes.imshow()` for
more details.
"""
from cartopy.io.ogc_clients import WMSRasterSource
wms = WMSRasterSource(wms, layers, getmap_extra_kwargs=wms_kwargs)
return self.add_raster(wms, **kwargs)
# Define the GeoAxesSubplot class, so that a type(ax) will emanate from
# cartopy.mpl.geoaxes, not matplotlib.axes.
class GeoAxesSubplot(matplotlib.axes.SubplotBase, GeoAxes):
_axes_class = GeoAxes
try:
matplotlib.axes._subplots._subplot_classes[GeoAxes] = GeoAxesSubplot
except AttributeError:
matplotlib.axes._subplot_classes[GeoAxes] = GeoAxesSubplot
def _trigger_patch_reclip(event):
"""
Defines an event callback for a GeoAxes which forces the outline and
background patches to be re-clipped next time they are drawn.
"""
axes = event.axes
# trigger the outline and background patches to be re-clipped
axes.outline_patch.reclip = True
axes.background_patch.reclip = True
| gpl-3.0 |
Moriadry/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 29 | 5677 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
terkkila/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
MJuddBooth/pandas | pandas/core/computation/pytables.py | 1 | 19403 | """ manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat import DeepChainMap, string_types, u
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation import expr, ops
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {name!r} is not defined'
.format(name=self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = Timedelta(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError("Cannot compare {v} of type {typ} to {kind} column"
.format(v=v, typ=type(v), kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{lhs}] -> [{op}]"
.format(lhs=self.filter[0], op=self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind).value for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index(values))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index(values))
else:
raise TypeError("passing a filterable condition to a non-table "
"indexer [{slf}]".format(slf=self))
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{cond}]]"
.format(cond=self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "({cond})".format(cond=' | '.join(vs))
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition,
op=self.op,
rhs=self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(self, 'visit_{node}'.format(node=bin_node),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {value!r} with "
"{slobj!r}".format(value=value, slobj=slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {name}"
.format(name=ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr,
slf=self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr,
slf=self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.