repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sinharrajesh/dbtools | twitter-analysis/botratings.py | 1 | 6404 | #!/usr/bin/python
# Author: Rajesh Sinha, Karan Narain
# Usage: botrating.py useranalys0106.csv Opfilename endingAt startingfrom
# useranalysis0106.csv is the supplied file which has prelim user data
# opfilename is the name the script will write to
# endingAt at which row
# starting from which row.
# Now we can run this in chunks of 1000 as follows
# botrating.py useranalysis0106.csv first1000.csv 1000 1
# botrating.py useranalysis0106.csv second1000.csv 2000 1001
# botrating.py useranalysis0106.csv third1000.csv 3000 2001
# botrating.py useranalysis0106.csv fourth1000.csv 4000 3001
# ...
# botrating.py useranalysis0106.csv last1000.csv 10000 9001
# Note it will be useful to redirect the stdout to a file so that in case things go wrong
# you still have the output for the records processed which can be manually merged
#
# botrating.py useranalysis0106.csv first1000.csv 1000 1 > first1000.tmp
# do not redirect the stderr
#
import botometer
import logging
import random
import pandas as pd
import sys
# Your mashape_key generated when you add botometer api to default app
mashape_key = ""
# Your twitter account and app keys here
consumer_key=''
consumer_secret=''
access_token_key=''
access_token_secret=''
#logging level
_loggingLevel = logging.INFO ## How much trace
def connectToBoto(mashape_key, consumer_key, consumer_secret, access_token_key, access_token_secret):
twitter_app_auth = {
'consumer_key': consumer_key,
'consumer_secret': consumer_secret,
'access_token': access_token_key,
'access_token_secret': access_token_secret,
}
try:
bom = botometer.Botometer(mashape_key=mashape_key, **twitter_app_auth)
except (KeyboardInterrupt, SystemExit):
raise
except:
logger.error('Unexpected error: %r', sys.exc_info()[0])
logger.error('Exception in connecting to Boto Mashape Gateway or Twitter')
return None
return bom
def openCsvFile(filename, startIndex, endAt):
logger.info('Passed params %s %d %d', filename, startIndex, endAt)
# Read the input file useranalysis0106.csv or whatever
df = pd.read_csv(sys.argv[1]) # Read all rows as for some reasons lambda is not working
logger.info('Subsetting the dataframe %r at %d:%d', df.shape, startIndex-1, endAt)
df = df[startIndex -1:endAt]
# Add the scores related columns to dataframe with default valuesO
# If you set anything to 0 you will always get 0 as column gets int
# type and all botometer returns are <0 decimals
df['ID String'] = ''
df['Score-English'] = 0.0
df['Score-Universal'] = 0.0
df['Score-Network'] = 0.0
df['Score-Content'] = 0.0
df['Score-Sentiment'] = 0.0
df['Score-Temporal'] = 0.0
df['Score-Friend'] = 0.0
df['Score-User'] = 0.0
#return the data frame
return df
def checkBotRatings(df):
# Iterate over the dataframe - passing handle to botometer and updating bot scores
for i, row in df.iterrows():
name = row['handle']
try:
result = bom.check_account(name)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
id_str = ''
nw = 0
cn = 0
sn = 0
tm = 0
fr = 0
us = 0
sc_en = 0
sc_un = 0
if 'user' in result:
if 'id_str' in result['user']:
id_str = result['user']['id_str']
nw = result['categories']['network']
cn = result['categories']['content']
sn = result['categories']['sentiment']
tm = result['categories']['temporal']
fr = result['categories']['friend']
us = result['categories']['user']
sc_en = result['scores']['english']
sc_un = result['scores']['universal']
df.set_value(i,'ID String', id_str)
df.set_value(i,'Score-English' , sc_en)
df.set_value(i,'Score-Universal' , sc_un)
df.set_value(i,'Score-Network' , nw)
df.set_value(i,'Score-Content' , cn)
df.set_value(i,'Score-Sentiment' , sn)
df.set_value(i,'Score-Temporal' , tm)
df.set_value(i,'Score-Friend' , fr)
df.set_value(i,'Score-User' , us)
print("%s,%s,%r,%r,%r,%r,%r,%r,%r,%r" %(name, id_str, sc_en, sc_un, nw, cn, sn, tm, fr, us))
sys.stdout.flush()
return df
def writeToOutputFile(df, opfilename):
# write back to a csv file post reordering of columns in the way we want
df.reindex(columns=['handle','ID String','Tweet','Retweet','Reply','Total','#tweets','#followers','#friends','Joining Date','n-cubit related %age', 'Score-English', 'Score-Universal', 'Score-Network','Score-Content', 'Score-Sentiment','Score-Temporal','Score-Friend','Score-User']).to_csv(opfilename, index=False)
return
def checkUsage(*argv):
USAGE="botrating.py inputfile opfile endAt rowstostartat"
if len(argv) != 5:
logger.error("Invalid No of arguments. Usage is %s", USAGE)
return (False, None, None, None, None)
else:
try:
inputFile = sys.argv[1]
outputFile = sys.argv[2]
endAt = int(sys.argv[3])
rowsToStartAt = int(sys.argv[4])
except (KeyboardInterrupt, SystemExit):
raise
except:
logger.error('Unexpected error: %r', sys.exc_info()[0])
logger.error("Invalid arguments. Usage is %s", USAGE)
return (False, None, None, None, None)
return (True, inputFile, outputFile, endAt, rowsToStartAt)
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logging.basicConfig(level=_loggingLevel)
status, inputFile, outputFile, endAt, rowsToStartAt = checkUsage(*sys.argv)
if status is False:
sys.exit(1)
bom = connectToBoto(mashape_key, consumer_key, consumer_secret, access_token_key, access_token_secret)
if bom:
df = openCsvFile(inputFile, rowsToStartAt, endAt)
df1 = checkBotRatings(df)
writeToOutputFile(df1, outputFile)
sys.exit(0)
else:
logger.error('Unable to connect to Boto and Twitter')
sys.exit(1)
| apache-2.0 |
ljwolf/pysal | pysal/spreg/opt.py | 8 | 2370 | import copy
def simport(modname):
"""
Safely import a module without raising an error.
Parameters
-----------
modname : str
module name needed to import
Returns
--------
tuple of (True, Module) or (False, None) depending on whether the import
succeeded.
Notes
------
Wrapping this function around an iterative context or a with context would
allow the module to be used without necessarily attaching it permanently in
the global namespace:
>>> for t,mod in simport('pandas'):
if t:
mod.DataFrame()
else:
#do alternative behavior here
del mod #or don't del, your call
instead of:
>>> t, mod = simport('pandas')
>>> if t:
mod.DataFrame()
else:
#do alternative behavior here
The first idiom makes it work kind of a like a with statement.
"""
try:
exec('import {}'.format(modname))
return True, eval(modname)
except:
return False, None
def requires(*args, **kwargs):
"""
Decorator to wrap functions with extra dependencies:
Arguments
---------
args : list
list of strings containing module to import
verbose : bool
boolean describing whether to print a warning message on import
failure
Returns
-------
Original function is all arg in args are importable, otherwise returns a
function that passes.
"""
v = kwargs.pop('verbose', True)
wanted = copy.deepcopy(args)
def inner(function):
available = [simport(arg)[0] for arg in args]
if all(available):
return function
else:
def passer(*args,**kwargs):
if v:
missing = [arg for i, arg in enumerate(wanted) if not available[i]]
print('missing dependencies: {d}'.format(d=missing))
print('not running {}'.format(function.__name__))
else:
pass
return passer
return inner
if __name__ == '__main__':
@requires('pandas')
def test():
import pandas
print('ASDF')
@requires('thisisnotarealmodule')
def test2():
print('you shouldnt see this')
test()
test2()
| bsd-3-clause |
MarkWieczorek/SHTOOLS | examples/python/GlobalSpectralAnalysis/GlobalSpectralAnalysis.py | 2 | 1591 | #!/usr/bin/env python3
"""
This script tests the different Spherical Harmonics Transforms on the Mars
topography data set
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pyshtools
from pyshtools import spectralanalysis
from pyshtools import shio
from pyshtools import expand
pyshtools.utils.figstyle()
# ==== MAIN FUNCTION ====
def main():
example()
# ==== PLOT POWER SPECTRA ====
def example():
"""
example that plots the power spectrum of Mars topography data
"""
# --- input data filename ---
infile = os.path.join(os.path.dirname(__file__),
'../../ExampleDataFiles/MarsTopo719.shape')
coeffs, lmax = shio.shread(infile)
# --- plot grid ---
grid = expand.MakeGridDH(coeffs, csphase=-1)
fig_map = plt.figure()
plt.imshow(grid)
# ---- compute spectrum ----
ls = np.arange(lmax + 1)
pspectrum = spectralanalysis.spectrum(coeffs, unit='per_l')
pdensity = spectralanalysis.spectrum(coeffs, unit='per_lm')
# ---- plot spectrum ----
fig_spectrum, ax = plt.subplots(1, 1)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('degree l')
ax.grid(True, which='both')
ax.plot(ls[1:], pspectrum[1:], label='power per degree l')
ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m')
ax.legend()
fig_map.savefig('SHRtopography_mars.png')
fig_spectrum.savefig('SHRspectrum_mars.png')
print('mars topography and spectrum saved')
# plt.show()
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/ensemble/__init__.py | 44 | 1228 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
liangz0707/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
kalvdans/scipy | scipy/stats/kde.py | 31 | 18766 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from scipy.special import logsumexp
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / norm_const / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
energy = zeros((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff,axis=0) / 2.0
result = logsumexp(-energy, b=1/self._norm_factor, axis=0)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = logsumexp(-energy, b=1/self._norm_factor)
return result
| bsd-3-clause |
q1ang/quant_at | TU_mom.py | 2 | 1729 | import matplotlib.pylab as plt
import numpy as np
import pandas as pd
dftu = pd.read_csv('TU.csv')
import corr
res = []
for lookback in [1, 5, 10, 25, 60, 120, 250]:
for holddays in [1, 5, 10, 25, 60, 120, 250]:
df_Close_lookback = dftu.Close.shift(lookback)
df_Close_holddays = dftu.Close.shift(-holddays)
dftu['ret_lag'] = (dftu.Close-df_Close_lookback)/df_Close_lookback
dftu['ret_fut'] = (df_Close_holddays-dftu.Close)/dftu.Close
dfc = dftu[['ret_lag','ret_fut']].dropna()
idx = None
if lookback >= holddays:
idx = np.array(range(0,len(dfc.ret_lag), holddays))
else:
idx = np.array(range(0,len(dfc.ret_lag), lookback))
dfc = dfc.ix[idx]
t, x, p = corr.p_corr(dfc.ret_lag, dfc.ret_fut)
res.append([lookback, holddays, t, p])
res = pd.DataFrame(res,columns=['geriye bakis','tutma gunu','korelasyon','p degeri'])
print res[res['geriye bakis'] >= 25]
import dd
def report(df,lookback,holddays):
longs = df.Close > df.Close.shift(lookback)
shorts = df.Close < df.Close.shift(lookback)
df['pos'] = 0.
for h in range(holddays):
long_lag = longs.shift(h).fillna(False)
short_lag = shorts.shift(h).fillna(False)
df.loc[long_lag,'pos'] += 1
df.loc[short_lag,'pos'] -= 1
ret=(df.pos.shift(1)* (df.Close-df.Close.shift(1)) / df.Close.shift(1)) \
/ holddays
cumret=np.cumprod(1+ret)-1
print 'APR', ((np.prod(1.+ret))**(252./len(ret)))-1
print 'Sharpe', np.sqrt(252.)*np.mean(ret)/np.std(ret)
print 'Dusus Kaliciligi', dd.calculateMaxDD(np.array(cumret))
return cumret
cumret=report(dftu,lookback = 250,holddays = 25)
plt.plot(cumret)
plt.show()
| gpl-3.0 |
chuckgu/Alphabeta | theano/preprocessing_video.py | 1 | 1567 | # -*- coding: utf-8 -*-
dataset_path='/home/chuckgu/Desktop/project/preprocessing/UCF101/UCF-101-Frames'
import csv
import numpy as np
import cPickle as pkl
import glob
import os
import pandas as pd
def main():
data=[]
lable=[]
index=1
for index,clas in enumerate(sorted(os.listdir(dataset_path))):
for directory in sorted(os.listdir(dataset_path+'/'+clas)):
os.chdir(dataset_path+'/'+clas+'/'+directory)
img=[]
for j,ff in enumerate(sorted(glob.glob("*.jpg"))):
print ff,index
#img.append(img_to_array(load_img(ff)))
img.append(dataset_path+'/'+clas+'/'+directory+'/'+ff)
if (j+1)%16==0:
data.append(img)
lable.append(index)
img=[]
#if index==9: break
#data=np.asarray(data)
n_samples = len(data)
sidx = np.random.permutation(n_samples)
n_train = int(np.round(n_samples * 0.2))
train_x = [data[s] for s in sidx[n_train:]]
train_y = [lable[s] for s in sidx[n_train:]]
test_x = [data[s] for s in sidx[:n_train]]
test_y = [lable[s] for s in sidx[:n_train]]
currdir = os.getcwd()
os.chdir('%s/' % '/home/chuckgu/Desktop/project/Alphabeta/data')
print n_samples,max(lable)
print 'Saving..'
f = open('ucf_data_all.pkl', 'wb')
pkl.dump(((train_x,train_y),(test_x,test_y)), f, -1)
f.close()
return data,lable
if __name__ == '__main__':
data,label=main()
#print len(set(y))
| gpl-3.0 |
bthirion/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 63 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
tmeits/pybrain | examples/rl/environments/shipsteer/shipbench_sde.py | 26 | 3454 | from __future__ import print_function
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with SPE on the ShipSteering Environment
#
# Requirements:
# pybrain (tested on rev. 1195, ship env rev. 1202)
# Synopsis:
# shipbenchm.py [<True|False> [logfile]]
# (first argument is graphics flag)
#########################################################################
__author__ = "Martin Felder, Thomas Rueckstiess"
__version__ = '$Id$'
#---
# default backend GtkAgg does not plot properly on Ubuntu 8.04
import matplotlib
matplotlib.use('TkAgg')
#---
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.directsearch.enac import ENAC
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
from pylab import figure, ion
from scipy import mean
import sys
if len(sys.argv) > 1:
useGraphics = eval(sys.argv[1])
else:
useGraphics = False
# create task
env=ShipSteeringEnvironment()
maxsteps = 500
task = GoNorthwardTask(env=env, maxsteps = maxsteps)
# task.env.setRenderer( CartPoleRenderer())
# create controller network
#net = buildNetwork(task.outdim, 7, task.indim, bias=True, outputbias=False)
net = buildNetwork(task.outdim, task.indim, bias=False)
#net.initParams(0.0)
# create agent
learner = ENAC()
learner.gd.rprop = True
# only relevant for RP
learner.gd.deltamin = 0.0001
#agent.learner.gd.deltanull = 0.05
# only relevant for BP
learner.gd.alpha = 0.01
learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
agent.actaspg = False
# create experiment
experiment = EpisodicExperiment(task, agent)
# print weights at beginning
print(agent.module.params)
rewards = []
if useGraphics:
figure()
ion()
pl = MultilinePlotter(autoscale=1.2, xlim=[0, 50], ylim=[0, 1])
pl.setLineStyle(linewidth=2)
# queued version
# experiment._fillQueue(30)
# while True:
# experiment._stepQueueLoop()
# # rewards.append(mean(agent.history.getSumOverSequences('reward')))
# print agent.module.getParameters(),
# print mean(agent.history.getSumOverSequences('reward'))
# clf()
# plot(rewards)
# episodic version
x = 0
batch = 30 #number of samples per gradient estimate (was: 20; more here due to stochastic setting)
while x<5000:
#while True:
experiment.doEpisodes(batch)
x += batch
reward = mean(agent.history.getSumOverSequences('reward'))*task.rewardscale
if useGraphics:
pl.addData(0,x,reward)
print(agent.module.params)
print(reward)
#if reward > 3:
# pass
agent.learn()
agent.reset()
if useGraphics:
pl.update()
if len(sys.argv) > 2:
agent.history.saveToFile(sys.argv[1], protocol=-1, arraysonly=True)
if useGraphics:
pl.show( popup = True)
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
## performance:
## experiment.doEpisodes(5) * 100 without weave:
## real 2m39.683s
## user 2m33.358s
## sys 0m5.960s
## experiment.doEpisodes(5) * 100 with weave:
##real 2m41.275s
##user 2m35.310s
##sys 0m5.192s
##
| bsd-3-clause |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
panda0881/pycharmtesting | sentiment_analysis_testing.py | 1 | 9976 | from textblob import TextBlob
import pandas as pd
testwords = ['good is bad, bad is good good', 'hello', 'fucking', 'best', 'beautiful', 'bad', 'wonderful', 'horrible',
'haha', 'ok', 'accaptable', 'jnfjanfjanfja']
testparagraph = """
Google was bound to make it in here somehow. Here are some intern perks at Google:
1. Google pays for flights from anywhere in the world to your office and from your office to anywhere in the world,
before and after your internship. (This is standard for most companies)
2. Google gives us free, and in my opinion, luxury housing. Although we share an apartment we three others, it's
equipped with a nice TV, a patio, a kitchen with a dishwasher, 2 baths, a washer and dryer, and biweekly cleaning. We
also have access to a 24-hour gym, a hot tub, a swimming pool, a games room, and a park.
3. Google buses pick us from corp housing and drop us back to corp housing many times during the day.
4. Google bikes are temporary bikes available in and around Google to use to cycle around campus. You can rent a bike
for free.
5. Google has over 20 gourmet cafeterias all over campus with almost all types of cuisine almost everyday. They serve 3
meals on all weekdays, with few exceptions.
6. Everyone is less than 100 feet away from a microkitchen, stuffed with all sorts of snacks, fruits and drinks. They
also come with a automatic coffee machine and an espresso machine. If there's something you want in your microkitchen,
it can be asked for.
7. Chess tables, board games, pool tables, table tennis tables and swimming pools can be found frequently around campus.
You're encouraged to use them, during work.
8. Interns get an hours worth of massage credits and get a professional massage. Massage chairs are scattered around
campus in case you want something automatic.
9. Weekly TGIF involves wine, beer, watching Larry and Sergey address the company, possibly asking them questions and
more. During work.
10. No work hours. Come and go when you want - just get work done and be present virtually at your meetings.
11. Request any electronic item you might need for use for your entire internship. Usually work related, but includes
laptops of choice, headphones, etc. You get to keep some of them. Interns can work on a chromebook, a chrome book pixel
or a 15" inch retina MacBook Pro, as of 2013.
12. Dogfood the newest stuff google makes.
13. Attend special internal hackathons to be the first to work on Google's coolest products.
14. Watch the first Loon launch.
15. Need to run errands at work? Borrow a google car and go anywhere you want for any amount of time.
16. The office never closes. Stay all night!
17. Nap pods. Sleep at work, in style.
18. Intern Boat Cruise in the bay. As crazy as they get.
19. Great pay on top of all the free stuff.
20. Heated toilet seats at work.
21. No clothing guidelines (this is the norm at most tech companies). Hair color, tattoos, piercings - it all runs as
long as you code.
22. The best internal tools. Can't say much more.
23. Volleyball courts, Soccer fields, and intra company sporting competitions. I'm sure they have more facilities I'm
not even aware of.
24. There are 5 or more full fledged high tech gyms at google including outdoor pull up bars and what not. When I say
high tech, I mean they count your reps for you. Free gym classes for everything you can imagine.
25. Free classes for random things - from python and C++ to salsa and more. You can take breaks from work to learn
something cool.
26. Free Google swag. Interns get a T shirt and a Patagonia backpack and a hoodie. Plus, you get to keep headphones and
if you're lucky, more valuable freebies.
27. You get to have a full fledge Hollywood movie featuring Owen Wilson and Vince Vaughn based on how cool your job is,
albeit more than slightly exaggerated. You also get free tickets to the red carpet premier a week before release. So
what if it's a crappy movie? Unlike Jobs or The Social Network, this is about the interns! It's about you.
28. Getting a full time job at google is very in demand and as a result, very hard. I won't reveal numbers but it is
orders if magnitude harder than the most selective college in America. Converting from an internship is much easier,
and that extra boost is great to have especially in a market where "Ex-Googler" is a status symbol.
29. Get to meet some legends. Just by being a little pushy and very lucky, you can easily set up meetings with Ken
Thompson and Jeff Dean. It's much easier to set up people with lesser known people all around google and just talk about
their projects and technology.
30. Last, but not least. The biggest perk of being at google are the people. The interns are extremely smart and
passionate but sociable and nice as well. The full timers are amazing too. All companies have great engineers,
but at Google you're surrounded by a city of so many of the smartest software engineers shaping the forefront of
technology today. The sheer quantity is mesmerizing. They are so well read (in code) and knowledgeable and very helpful.
If you make use of it, it's like infinite office hours with a professor who's always at your service!
Edit:
31. On-site haircuts two days a week, with professional stylists.
32. On-site laundry if you so please.
33. "Park at Google when you go to concerts at Shoreline. Also, pick up free drinks and snacks at Google at the same
time. Sometimes it's nice, after the concert, to play a game of pool or something with your friends while the
concertgoers are stuck in traffic." - Iain McClatchie
This summer they had artists ranging from John Mayer to Brad Paisley to Wiz Khalifa.
34. If you're lost or need any transport, you can call the GCar or the GShuttle to pick you up if you're anywhere around
campus.
187.9k Views · View Upvotes
Upvote2.7kDownvoteComments27+
Share
Bogdan Cristian Tătăroiu
Bogdan Cristian Tătăroiu, Intern at Dropbox, formerly at Twitter and Facebook
Updated Aug 15, 2013 · Featured in Forbes · Upvoted by Oliver Emberton, Founder of Silktide and Ryhan Hassan, Interned
at Apple, Google. Joining Dropbox.
Dropbox has by far the most perks I've seen in any Silicon Valley company.
The major event that stood out to me this summer was Parent's Weekend, where they flew out all intern parents down to
their San Francisco office, housed them for 2 nights, organised a bunch of talks explaining Dropbox to them, where we
stand now, our future products, our vision etc. and basically helped them understand why all of us working here are so
excited about what we're doing.
It was an awesome family-like experience all round and for me personally it was made even better by the fact that it was
my father's first trip to the United States and my mother's second and they finally got to understand why I chose to do
what I do and be where I am right now.
Other than that:
They completely cover your housing - either 2 people in a 2 bedroom apartment or, if you're lucky, 1 person in a 1
bedroom apartment.
They have shuttles which pick you up from corporate housing locations and take you back from the office to _anywhere_
in SF.
The Tuckshop (our in-house restaurant) literally makes better food than I find in most restaurants I eat in over the
weekend in SF.
They cover expenses: phone plan, caltrain gopass, muni & bart pass, flights.
Giant music room with everything from grand piano to electric guitars and drumset
Massages, haircuts, professional ping pong training, on-site gym.
No work hours - come and go as you please.
We host Hack Week, where the entire company stops what they are normally doing, brings in guests (expenses covered) and
works on anything.
The quality of the people you work with is incredible. Every once in a while there comes a tech company that becomes a
magnet for engineering talent - first it was Google, then it was Facebook, now Dropbox seems to be following in their
footsteps.
We have an internal joke that if the file syncing business goes bust, we can just turn into a restaurant and t-shirt
company and we'll be fine. That's reflected in the amount of swag you get here.
Request anything from IT department (we got StarCraft II licences for a hack week AI).
100$ monthly Exec credit
Saving the best for last, you can set your own Dropbox space quota for life.
The list goes on and on and while some of the perks I mentioned can be found at other companies, if you actually see
them first hand, they always have a slight twist which makes them even better.
"""
blob = TextBlob(testparagraph)
# blob = blob.correct()
words = list(blob.tags)
word_type_list = ['JJ', 'NN', 'NR', 'NT', 'PN', 'AD']
words2 = list()
pair_list = list()
for i in range(0, len(words)):
if words[i][1] in word_type_list:
# print(words[i])
words2.append(words[i])
last_noun_position = 0
last_PN_position = 0
for i in range(0, len(words2)):
if last_noun_position > last_PN_position:
last_position = last_noun_position
else:
last_position = last_PN_position
if words2[i][1] in ['NN', 'NR', 'NT']:
for j in range(last_position, i):
if words2[j][1] == 'JJ':
pair_list.append((words2[j], words2[i]))
last_noun_position = i
elif words2[i][1] == 'PN':
for j in range(last_position, i):
if words2[j][1] == 'JJ':
pair_list.append((words2[j], words2[last_noun_position]))
last_PN_position = i
result = dict()
for pair in pair_list:
if pair[1][0] not in result:
result[pair[1][0]] = TextBlob(pair[0][0]).sentiment.polarity
else:
result[pair[1][0]] += TextBlob(pair[0][0]).sentiment.polarity
result = pd.Series(result)
result.sort_values(ascending=False, inplace=True)
positive_reason = result[:5]
negative_reason = result[-5:].sort_values()
print('Top five positive reasons: ')
print(positive_reason)
print('Top five negative reasons: ')
print(negative_reason)
print('end')
| apache-2.0 |
zzcclp/spark | python/pyspark/pandas/tests/test_categorical.py | 14 | 16649 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
pdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_groupby_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_transform()
pdf, psdf = self.df_pair
def identity(x) -> ps.Series[psdf.b.dtype]: # type: ignore
return x
self.assert_eq(
psdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def astype(x) -> ps.Series[dtype]:
return x.astype(dtype)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
)
else:
expected = pdf.groupby("a").transform(astype)
expected["b"] = dtype.categories.take(expected["b"].cat.codes).astype(dtype)
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
expected.sort_values("b").reset_index(drop=True),
)
def test_frame_apply_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
def test_frame_apply_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_str).sort_values(["a", "b"]).reset_index(drop=True),
to_str(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_category)
.sort_values(["a", "b"])
.reset_index(drop=True),
to_category(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.cat.codes).sort_index(),
pdf.b.cat.codes.sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.astype(dtype)).sort_index(),
pdf.b.astype(dtype).sort_index(),
)
def test_frame_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_str).sort_index(),
to_str(pdf).sort_index(),
)
def to_codes(pdf) -> ps.Series[np.int8]:
return pdf.b.cat.codes
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_codes).sort_index(),
to_codes(pdf).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).sort_index(),
)
def to_category(pdf) -> ps.Series[dtype]:
return pdf.b.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).rename().sort_index(),
)
def test_series_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(str)).sort_index(),
pdf.a.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(dtype)).sort_index(),
pdf.a.astype(dtype).sort_index(),
)
def test_series_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_transform_batch()
pdf, psdf = self.df_pair
def to_str(pser) -> ps.Series[str]:
return pser.astype(str)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_str).sort_index(), to_str(pdf.a).sort_index()
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf.a).sort_index(),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_categorical import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
andrewruba/YangLab | JPC simulations 2019/Figure 4 - precision/simulation.py | 2 | 14336 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 10:52:38 2017
@author: Andrew Ruba
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import random
import csv
import os
from numpy.random import choice
import numpy as np
from scipy.optimize import curve_fit
import time
import matplotlib.pyplot as plt
from scipy import stats
## below arrays are for saving simulation data for statistical analysis
global gausslist
gausslist = []
global bimodallist
bimodallist = []
global bimodalmean
bimodalmean = []
global bimodalsd
bimodalsd = []
global bimodalheight
bimodalheight = []
global bimodalauc
bimodalauc = []
def sim(gui, PTNUM, RADIUS, PREC, ITER, BINSIZE, PERCERROR):
def simulation(num_points, radius, dr, ss, mm):
def area_fn(X):
X = float(X)
A = -(dr**2)*np.pi
B = dr*2*np.pi
return X*B+A
def gauss_fn(x, s, m):
a = area_fn(m)
x = float(x)
s = float(s)
m = float(m)
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def combine(x):
s = ss
m = mm
return (area_fn(x) * gauss_fn(x, s, m))
##starting with perfect x,y and adding error
xydata = []
mm = mm + 0.00001
while len(xydata) < num_points:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
z_prec = np.random.normal(0.0, ss)
xydata.append((mm*np.cos(theta)+y_prec, mm*np.sin(theta)+z_prec))
with open('3d.csv', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', lineterminator='\n')
writer.writerow(('y','z'))
for i in xydata:
writer.writerow(i)
def gen_matrix(r, d_r):
##'be' is short for bin edges
if r%d_r > 0:
be = range(0, r+r%d_r, d_r)
else:
be = range(0, r+d_r, d_r)
matrix = []
for i in range(len(be)-1):
matrix.append([])
x = 0
for i in range(len(matrix)):
for j in range(x):
matrix[i].append(0)
x += 1
##generate areas of sections closest to x axis
for i in range(len(matrix)):
theta = np.arccos(float(be[len(be)-2-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-2-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area))
##skipping factor
x = 2
##generate areas of layers going further out from x axis
while len(matrix[0]) < len(matrix):
for i in range(len(matrix) - len(matrix[0])):
num = 0
for j in range(len(matrix)):
for k in range(len(matrix[i]) + 1):
if j == i and k < len(matrix[i]):
num += matrix[j][k]
elif j > i:
num += matrix[j][k]
theta = np.arccos(float(be[len(be)-1-x-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-1-x-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area) - num)
x += 1
return matrix
def smoothdata(data, r, d_r):
"""smoothds data with 3 moving window and takes abs value average"""
smooth_data = []
r += 1
##comment out for smoothing
smooth_data = []
for i in range(len(data)):
smooth_data.append(data[i])
##adds + and - bins
final_smooth_data = []
for i in range(int(r/d_r)):
final_smooth_data.append(smooth_data[i] + smooth_data[len(smooth_data)-1-i])
return list(reversed(final_smooth_data))
def deconvolution(hv, be, r, d_r):
"""hv = hist_values, be = bin_edges"""
density = []
matrix = gen_matrix(r, d_r)
while len(hv) > len(matrix):
hv.pop()
while len(matrix) > len(hv):
matrix.pop()
rev_hv = list(reversed(hv))
x = 0
for i in range(len(rev_hv)):
##calculate how much to subtract from bin
density_sub = 0
y = 0
for j in range(x):
density_sub += density[y] * matrix[j][i]
y += 1
##calculate final bin value
density.append((rev_hv[i] - density_sub) / matrix[i][i])
x += 1
unrev_hv = list(reversed(density))
smooth_data = []
for i in range(len(unrev_hv)):
if i == 0 or i == (len(unrev_hv) - 1):
smooth_data.append(unrev_hv[i])
else:
smooth_data.append(np.average([unrev_hv[i-1], unrev_hv[i], unrev_hv[i+1]]))
return unrev_hv, smooth_data, hv
def make_hist(data, r, d_r):
hist_values, bin_edges = np.histogram(data, bins = 2 * int(r/d_r), range = (-r, r))
new_bin_edges = []
for i in bin_edges:
if i >= 0:
new_bin_edges.append(i)
new_hist_values = smoothdata(hist_values, r, d_r)
return new_hist_values, new_bin_edges
def csv_read(path):
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
holdlist = []
for row in reader:
holdlist.append(float(row[1]))
return holdlist
jkl = []
for y,z in xydata:
jkl.append(y)
radius = int(np.floor(radius/dr))*dr
if num_points == PTNUM + 1:
## decide the proper bin size
minbinsize = 2
binsizes = []
binsizesdata = [[] for variable in range(1, int(PREC)+1)]
gui.message.set('0% done calculating ideal bin size...')
gui.update()
for binoptimization in range(10):
for binsize in range(1, int(PREC)+1):
if binsize >= minbinsize:
error = 0
# print ('binsize ' + str(binsize))
jkl = []
mm = mm + 0.00001
while len(jkl) < num_points-1:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
jkl.append(mm*np.cos(theta)+y_prec)
a,b = make_hist(jkl, radius, binsize)
final_unsmooth, final_smooth, final_2d = deconvolution(a, b, radius, binsize)
holdlist = []
addZero = False
for val in list(reversed(final_unsmooth)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
final_unsmooth = list(reversed(holdlist))
##rescale ideal data
matrix = gen_matrix(radius, binsize)
newmatrix = []
for i in matrix:
newmatrix.append(list(reversed(i)))
matrix = list(reversed(newmatrix))
# print (a)
# print (final_unsmooth)
while len(a) > len(matrix):
a.pop()
while len(matrix) > len(a):
matrix.pop()
for ncol in range(len(matrix[0])):
binsub = 0.0
for mcol in range(len(matrix)):
binsub += float(matrix[mcol][ncol]*final_unsmooth[mcol])
try:
if a[ncol] != 0.0:
# print (binsub)
error += np.square(a[ncol] - binsub) / a[ncol]
except:
pass
popped = a.pop()
while popped == 0:
popped = a.pop()
binsizesdata[binsize-1].append((error, len(a)+1,1-stats.chi2.cdf(error, len(a)+1),binsize))
else:
binsizesdata[binsize-1].append((1000000.0,1,0.0,binsize))
gui.message.set(str((binoptimization*10) + 10) + ' % done calculating ideal bin size...')
gui.update()
finalbinsizes = []
for bintrial in range(len(binsizesdata)):
errhold = []
dfhold = []
pvalhold = []
binhold = []
for trial in range(len(binsizesdata[bintrial])):
chisq, df, pval, binsize = binsizesdata[bintrial][trial]
errhold.append(chisq)
dfhold.append(df)
pvalhold.append(pval)
binhold.append(binsize)
chisq = np.average(errhold)
df = np.round(np.average(dfhold))
pval = 1-stats.chi2.cdf(chisq,df)
binsize = binhold[0]
finalbinsizes.append((chisq,df,pval,binsize))
# print (finalbinsizes)
for binsizedata in finalbinsizes:
chisq, df, pval, binsize = binsizedata
if pval >= 0.95:
dr = binsize
break
else:
dr = int(PREC)
a,b = make_hist(jkl, radius, dr)
final = deconvolution(a,b,radius,dr)
if num_points != PTNUM + 1:
def gauss_fn(x, a, s, m):
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
return gauss_fn(x, A1, sigma1, mu1)+gauss_fn(x, A2, sigma2, mu2)
try:
guess = [np.max(final[0]), ss, mm]
tempbins = list(range(int(dr/2), radius+int(dr/2), dr))
tempdensity = final[0]
holdlist = []
addZero = False
for val in list(reversed(tempdensity)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
tempdensity = list(reversed(holdlist))
while len(tempdensity) > len(tempbins):
tempdensity.pop()
while len(tempbins) > len(tempdensity):
tempbins.pop()
revtempbins = list(np.negative(list(reversed(tempbins))))
revtempdensity = list(reversed(tempdensity))
bins = revtempbins + tempbins
density = revtempdensity + tempdensity
params, var = curve_fit(gauss_fn, bins, density, p0 = guess)
params_gauss = np.abs(params)
## computes 1 SD errors
var_gauss = np.sqrt(np.diag(var))
def frange(beg, end, step):
f_range = []
while beg < end - (step/2.0):
f_range.append(beg)
beg += step
return f_range
guess = [-mm, ss, np.max(final[0]), mm, ss, np.max(final[0])]
tempbins = frange(dr/2.0, radius, dr)
tempdensity = final[0]
holdlist = []
addZero = False
for val in list(reversed(tempdensity)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
tempdensity = list(reversed(holdlist))
while len(tempdensity) > len(tempbins):
tempdensity.pop()
while len(tempbins) > len(tempdensity):
tempbins.pop()
revtempbins = list(np.negative(list(reversed(tempbins))))
revtempdensity = list(reversed(tempdensity))
bins = revtempbins + tempbins
density = revtempdensity + tempdensity
params, var = curve_fit(bimodal, bins, density, p0 = guess)
params = np.abs(params)
## computes 1 SD errors
var = np.sqrt(np.diag(var))
## average paramters
stdev = np.average((params[1], params[4]))
mean = np.average((params[0], params[3]))
height = np.average((params[2], params[5]))
stdev_e = np.average((var[1], var[4]))
mean_e = np.average((var[0], var[3]))
height_e = np.average((var[2], var[5]))
params_bimodal = [height, stdev, mean]
var_bimodal = [height_e, stdev_e, mean_e]
## uncomment following for comparing central vs. peripheral peak fitting errors
# bimodalmean.append(params_gauss[0])
bimodalmean.append(mean)
# bimodalmean.append(tempdensity)
bimodalsd.append(stdev)
bimodalheight.append(height)
auc = 0.0
step = mean - 5.0*stdev
while step < mean + 5.0*stdev:
auc+=0.01*gauss_fn(step,height,stdev,mean)
step += 0.01
bimodalauc.append(auc)
# bimodallist.append(var_bimodal[1])
gausslist.append(var_gauss[1])
# if np.sum(var_bimodal) < np.sum(var_gauss):
params = params_bimodal
var = var_bimodal
# else:
# params = params_gauss
# var = var_gauss
except RuntimeError:
params = []
var = []
hist_mids = []
for i in range(len(b)-1):
hist_mids.append(np.average((b[i],b[i+1])))
norm_values = []
for i in final[0]:
norm_values.append(i/np.max(final[0]))
return params, var, norm_values, hist_mids, dr
else:
return dr
pt_min = PTNUM
pt_max = PTNUM
rt_min = RADIUS
rt_max = RADIUS
prec_min = PREC
prec_max = PREC
iterations = ITER
PREC = float(PREC)
one_diff = []
perc_err = PERCERROR*0.01
def roundup(x):
val = int(math.ceil(x / 10.0)) * 10
if val >= 30:
return val
else:
return 30
ptlist = range(pt_min, pt_max+100, 100)
for pt in ptlist:
for rt in range(rt_min, rt_max+1, 1):
for prec in range(prec_min, prec_max+1, 1):
prec = prec+0.000001
xrng = roundup(2.0*rt + prec*5.0)
# DR = simulation(pt+1, xrng, BINSIZE, float(prec), float(rt))
## uncomment below to manually set bin size
DR = PTNUM
# print ('ideal bin size: '+ str(DR))
p, v, d, h_m, DR = simulation(1000000, xrng, DR, float(prec), float(rt))
# print (p)
a, s, m = p
corr = m - float(rt)
##outlier detection
_mean = np.mean(bimodalmean)
_stdev = np.std(bimodalmean)
togui = []
for i in bimodalmean:
if i >= _mean-3*_stdev and i <= _mean+3*_stdev:
togui.append(i)
final_data = []
for i,j in zip(h_m, d):
final_data.append((i,j))
final_data.append((-i,j))
final_data.sort()
with open('precision_results.csv', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', lineterminator='\n')
writer.writerow(('bin middle','normalized density'))
for i in final_data:
writer.writerow(i)
return "Output written to precision_results.csv" | gpl-3.0 |
rahul-c1/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 10 | 8105 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette
Coefficient. If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 2 <= n_labels <= n_samples-1:
raise ValueError("Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhoeutte Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
flightgong/scikit-learn | sklearn/pipeline.py | 8 | 16439 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implements fit and transform methods.
The final estimator needs only implements fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
fcbruce/text-classifier | src/svm_train.py | 1 | 1118 | #coding=utf-8
#
# Author : fcbruce <[email protected]>
#
# Time : Fri 30 Dec 2016 16:09:39
#
#
from sklearn import preprocessing as pp
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
import numpy as np
import sklearn.metrics as skmt
import datetime
import random
import pickle as pkl
from config import *
from evaluation import calc_overdue_rate
train_mat = np.load(train_data)
test_mat = np.load(test_data)
X_train = train_mat[:, :-1]
y_train = train_mat[:, -1]
X_test = test_mat[:, :-1]
y_test = test_mat[:, -1]
scaler = pp.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#clf = SVC(C=10., kernel='rbf', gamma=3e-3, class_weight={1.: 3.95}, random_state=198964, verbose=True, cache_size=512)
clf = SVC(C=10., kernel='linear', class_weight={1.: 3.95}, random_state=2017, verbose=True, cache_size=512, probability=True)
#clf.fit(X_train, y_train)
#f = open(svm_model_path % '20170124', 'wb')
#pkl.dump(clf, f, pkl.HIGHEST_PROTOCOL)
auc = cross_val_score(clf, X_train, y_train, cv=10, scoring='roc_auc')
print auc.mean()
| mit |
xlhtc007/blaze | blaze/compute/core.py | 6 | 14107 | from __future__ import absolute_import, division, print_function
import numbers
from datetime import date, datetime
import toolz
from toolz import first, concat, memoize, unique, assoc
import itertools
from collections import Iterator
from ..compatibility import basestring
from ..expr import Expr, Field, Symbol, symbol, eval_str
from ..dispatch import dispatch
__all__ = ['compute', 'compute_up']
base = (numbers.Number, basestring, date, datetime)
@dispatch(Expr, object)
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data
@dispatch(Expr, object)
def post_compute(expr, result, scope=None):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope=None, **kwargs):
return type(seq)(compute(item, scope or {}, **kwargs) for item in seq)
@dispatch(Expr, object)
def compute(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
ts = set([x for x in expr._subterms() if isinstance(x, Symbol)])
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(object)
def compute_down(expr, **kwargs):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr
def issubtype(a, b):
""" A custom issubclass """
if issubclass(a, b):
return True
if issubclass(a, (tuple, list, set)) and issubclass(b, Iterator):
return True
if issubclass(b, (tuple, list, set)) and issubclass(a, Iterator):
return True
return False
def type_change(old, new):
""" Was there a significant type change between old and new data?
>>> type_change([1, 2], [3, 4])
False
>>> type_change([1, 2], [3, [1,2,3]])
True
Some special cases exist, like no type change from list to Iterator
>>> type_change([[1, 2]], [iter([1, 2])])
False
"""
if all(isinstance(x, base) for x in old + new):
return False
if len(old) != len(new):
return True
new_types = list(map(type, new))
old_types = list(map(type, old))
return not all(map(issubtype, new_types, old_types))
def top_then_bottom_then_top_again_etc(expr, scope, **kwargs):
""" Compute expression against scope
Does the following interpreter strategy:
1. Try compute_down on the entire expression
2. Otherwise compute_up from the leaves until we experience a type change
(e.g. data changes from dict -> pandas DataFrame)
3. Re-optimize expression and re-pre-compute data
4. Go to step 1
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i4')])
>>> e = s.amount.sum() + 1
>>> top_then_bottom_then_top_again_etc(e, {s: data})
601
See Also
--------
bottom_up_until_type_break -- uses this for bottom-up traversal
top_to_bottom -- older version
bottom_up -- older version still
"""
# 0. Base case: expression is in dict, return associated data
if expr in scope:
return scope[expr]
if not hasattr(expr, '_leaves'):
return expr
leaf_exprs = list(expr._leaves())
leaf_data = [scope.get(leaf) for leaf in leaf_exprs]
# 1. See if we have a direct computation path with compute_down
try:
return compute_down(expr, *leaf_data, **kwargs)
except NotImplementedError:
pass
# 2. Compute from the bottom until there is a data type change
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
# 3. Re-optimize data and expressions
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
if pre_compute_:
scope3 = dict((e, pre_compute_(e, datum,
**assoc(kwargs, 'scope', scope2)))
for e, datum in scope2.items())
else:
scope3 = scope2
if optimize_:
try:
expr3 = optimize_(expr2, *[scope3[leaf] for leaf in expr2._leaves()])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
scope4 = dict((e._subs(_d), d) for e, d in scope3.items())
except NotImplementedError:
expr3 = expr2
scope4 = scope3
else:
expr3 = expr2
scope4 = scope3
# 4. Repeat
if expr.isidentical(expr3):
raise NotImplementedError("Don't know how to compute:\n"
"expr: %s\n"
"data: %s" % (expr3, scope4))
else:
return top_then_bottom_then_top_again_etc(expr3, scope4, **kwargs)
def top_to_bottom(d, expr, **kwargs):
""" Processes an expression top-down then bottom-up """
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
if not hasattr(expr, '_leaves'):
return expr
leaves = list(expr._leaves())
data = [d.get(leaf) for leaf in leaves]
# See if we have a direct computation path with compute_down
try:
return compute_down(expr, *data, **kwargs)
except NotImplementedError:
pass
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
# Otherwise...
# Compute children of this expression
if hasattr(expr, '_inputs'):
children = [top_to_bottom(d, child, **kwargs)
for child in expr._inputs]
else:
children = []
# Did we experience a data type change?
if type_change(data, children):
# If so call pre_compute again
if pre_compute_:
children = [pre_compute_(expr, child, **kwargs) for child in children]
# If so call optimize again
if optimize_:
try:
expr = optimize_(expr, *children)
except NotImplementedError:
pass
# Compute this expression given the children
return compute_up(expr, *children, scope=d, **kwargs)
_names = ('leaf_%d' % i for i in itertools.count(1))
_leaf_cache = dict()
_used_tokens = set()
def _reset_leaves():
_leaf_cache.clear()
_used_tokens.clear()
def makeleaf(expr):
""" Name of a new leaf replacement for this expression
>>> _reset_leaves()
>>> t = symbol('t', '{x: int, y: int, z: int}')
>>> makeleaf(t)
t
>>> makeleaf(t.x)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x).isidentical(makeleaf(t.x + 1))
False
>>> from blaze import sin, cos
>>> x = symbol('x', 'real')
>>> makeleaf(cos(x)**2).isidentical(sin(x)**2)
False
>>> makeleaf(t) is t # makeleaf passes on Symbols
True
"""
name = expr._name or '_'
token = None
if expr in _leaf_cache:
return _leaf_cache[expr]
if isinstance(expr, Symbol): # Idempotent on symbols
_used_tokens.add((name, expr._token))
return expr
if (name, token) in _used_tokens:
for token in itertools.count():
if (name, token) not in _used_tokens:
break
result = symbol(name, expr.dshape, token)
_used_tokens.add((name, token))
_leaf_cache[expr] = result
return result
def data_leaves(expr, scope):
return [scope[leaf] for leaf in expr._leaves()]
def bottom_up_until_type_break(expr, scope, **kwargs):
""" Traverse bottom up until data changes significantly
Parameters
----------
expr: Expression
Expression to compute
scope: dict
namespace matching leaves of expression to data
Returns
-------
expr: Expression
New expression with lower subtrees replaced with leaves
scope: dict
New scope with entries for those leaves
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i8')])
This computation completes without changing type. We get back a leaf
symbol and a computational result
>>> e = (s.amount + 1).distinct()
>>> bottom_up_until_type_break(e, {s: data}) # doctest: +SKIP
(amount, {amount: array([101, 201, 301])})
This computation has a type change midstream (``list`` to ``int``), so we
stop and get the unfinished computation.
>>> e = s.amount.sum() + 1
>>> bottom_up_until_type_break(e, {s: data})
(amount_sum + 1, {amount_sum: 600})
"""
# 0. Base case. Return if expression is in scope
if expr in scope:
leaf = makeleaf(expr)
return leaf, {leaf: scope[expr]}
inputs = list(unique(expr._inputs))
# 1. Recurse down the tree, calling this function on children
# (this is the bottom part of bottom up)
exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs)
for i in inputs])
# 2. Form new (much shallower) expression and new (more computed) scope
new_scope = toolz.merge(new_scopes)
new_expr = expr._subs(dict((i, e) for i, e in zip(inputs, exprs)
if not i.isidentical(e)))
old_expr_leaves = expr._leaves()
old_data_leaves = [scope.get(leaf) for leaf in old_expr_leaves]
# 3. If the leaves have changed substantially then stop
key = lambda x: str(type(x))
if type_change(sorted(new_scope.values(), key=key),
sorted(old_data_leaves, key=key)):
return new_expr, new_scope
# 4. Otherwise try to do some actual work
try:
leaf = makeleaf(expr)
_data = [new_scope[i] for i in new_expr._inputs]
except KeyError:
return new_expr, new_scope
try:
return leaf, {leaf: compute_up(new_expr, *_data, scope=new_scope,
**kwargs)}
except NotImplementedError:
return new_expr, new_scope
def bottom_up(d, expr):
"""
Process an expression from the leaves upwards
Parameters
----------
d : dict mapping {Symbol: data}
Maps expressions to data elements, likely at the leaves of the tree
expr : Expr
Expression to compute
Helper function for ``compute``
"""
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
# Compute children of this expression
children = ([bottom_up(d, child) for child in expr._inputs]
if hasattr(expr, '_inputs') else [])
# Compute this expression given the children
result = compute_up(expr, *children, scope=d)
return result
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Examples
--------
>>> from blaze import Data
>>> t = Data([1, 2, 3], dshape='3 * int', name='t')
>>> swap_resources_into_scope(t.head(2), {})
(t.head(2), {t: [1, 2, 3]})
>>> expr, scope = _
>>> list(scope.keys())[0]._resources()
{}
"""
resources = expr._resources()
symbol_dict = dict((t, symbol(t._name, t.dshape)) for t in resources)
resources = dict((symbol_dict[k], v) for k, v in resources.items())
other_scope = dict((k, v) for k, v in scope.items()
if k not in symbol_dict)
new_scope = toolz.merge(resources, other_scope)
expr = expr._subs(symbol_dict)
return expr, new_scope
@dispatch(Expr, dict)
def compute(expr, d, **kwargs):
""" Compute expression against data sources
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
_reset_leaves()
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
post_compute_ = kwargs.get('post_compute', post_compute)
expr2, d2 = swap_resources_into_scope(expr, d)
if pre_compute_:
d3 = dict(
(e, pre_compute_(e, dat, **kwargs))
for e, dat in d2.items()
if e in expr2
)
else:
d3 = d2
if optimize_:
try:
expr3 = optimize_(expr2, *[v for e, v in d3.items() if e in expr2])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
d4 = dict((e._subs(_d), d) for e, d in d3.items())
except NotImplementedError:
expr3 = expr2
d4 = d3
else:
expr3 = expr2
d4 = d3
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
if post_compute_:
result = post_compute_(expr3, result, scope=d4)
return result
@dispatch(Field, dict)
def compute_up(expr, data, **kwargs):
return data[expr._name]
| bsd-3-clause |
danche354/Sequence-Labeling | preprocessing/ner-auto-encoder/test_auto_encoder.py | 2 | 2502 | from keras.models import load_model
import pandas as pd
import numpy as np
import sys
import os
# change dir to train file, for environment
os.chdir('../../ner/')
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import load_data
from tools import prepare
epoch = sys.argv[1]
test = sys.argv[2]
path = './model/word-hash-auto-encoder-128/model_epoch_%s.h5'%epoch
model = load_model(path)
train_data = load_data.load_ner(dataset='eng.train')
dev_data = load_data.load_ner(dataset='eng.testa')
test_data = load_data.load_ner(dataset='eng.testb')
train_word = []
dev_word = []
test_word = []
# all word
[train_word.extend(list(each[0])) for each in train_data]
[dev_word.extend(list(each[0])) for each in dev_data]
[test_word.extend(list(each[0])) for each in test_data]
train_word = [each.strip().lower() for each in train_word]
dev_word = [each.strip().lower() for each in dev_word]
test_word = [each.strip().lower() for each in test_word]
train_word_dict = {}
dev_word_dict = {}
test_word_dict = {}
for each in train_word:
if each in train_word_dict:
train_word_dict[each] += 1
else:
train_word_dict[each] = 1
for each in dev_word:
if each in dev_word_dict:
dev_word_dict[each] += 1
else:
dev_word_dict[each] = 1
for each in test_word:
if each in test_word_dict:
test_word_dict[each] += 1
else:
test_word_dict[each] = 1
train_word = train_word_dict.keys()
dev_word = dev_word_dict.keys()
test_word = test_word_dict.keys()
if test=='dev':
word = dev_word[:20]
elif test=='test':
word = test_word[:20]
else:
word = train_word[:20]
word_hashing = prepare.prepare_auto_encoder(batch=word, task='ner')
word_hashing = word_hashing.toarray()
output = model.predict_on_batch(word_hashing)
while True:
number = input('please input word index: ')
exist = word[number]
print('word is: ' + exist)
if exist in train_word_dict:
print(' in train: ' + str(train_word_dict[exist]) + ' times.')
if exist in dev_word_dict:
print(' in dev: ' + str(dev_word_dict[exist]) + ' times.')
if exist in test_word_dict:
print(' in test: ' + str(test_word_dict[exist]) + ' times.')
print('-'*60)
ind = []
for i, e in enumerate(word_hashing[number]):
if e==1:
print(i)
ind.append(i)
print('word_hasing'+ '-'*60)
for i in ind:
print(output[number][i])
print('output'+ '-'*60)
| mit |
openradar/TINT | tint/objects.py | 1 | 8903 | """
tint.objects
============
Functions for managing and recording object properties.
"""
import numpy as np
import pandas as pd
import pyart
from scipy import ndimage
from .grid_utils import get_filtered_frame
def get_object_center(obj_id, labeled_image):
""" Returns index of center pixel of the given object id from labeled
image. The center is calculated as the median pixel of the object extent;
it is not a true centroid. """
obj_index = np.argwhere(labeled_image == obj_id)
center = np.median(obj_index, axis=0).astype('i')
return center
def get_obj_extent(labeled_image, obj_label):
""" Takes in labeled image and finds the radius, area, and center of the
given object. """
obj_index = np.argwhere(labeled_image == obj_label)
xlength = np.max(obj_index[:, 0]) - np.min(obj_index[:, 0]) + 1
ylength = np.max(obj_index[:, 1]) - np.min(obj_index[:, 1]) + 1
obj_radius = np.max((xlength, ylength))/2
obj_center = np.round(np.median(obj_index, axis=0), 0)
obj_area = len(obj_index[:, 0])
obj_extent = {'obj_center': obj_center, 'obj_radius': obj_radius,
'obj_area': obj_area, 'obj_index': obj_index}
return obj_extent
def init_current_objects(first_frame, second_frame, pairs, counter):
""" Returns a dictionary for objects with unique ids and their
corresponding ids in frame1 and frame1. This function is called when
echoes are detected after a period of no echoes. """
nobj = np.max(first_frame)
id1 = np.arange(nobj) + 1
uid = counter.next_uid(count=nobj)
id2 = pairs
obs_num = np.zeros(nobj, dtype='i')
origin = np.array(['-1']*nobj)
current_objects = {'id1': id1, 'uid': uid, 'id2': id2,
'obs_num': obs_num, 'origin': origin}
current_objects = attach_last_heads(first_frame, second_frame,
current_objects)
return current_objects, counter
def update_current_objects(frame1, frame2, pairs, old_objects, counter):
""" Removes dead objects, updates living objects, and assigns new uids to
new-born objects. """
nobj = np.max(frame1)
id1 = np.arange(nobj) + 1
uid = np.array([], dtype='str')
obs_num = np.array([], dtype='i')
origin = np.array([], dtype='str')
for obj in np.arange(nobj) + 1:
if obj in old_objects['id2']:
obj_index = old_objects['id2'] == obj
uid = np.append(uid, old_objects['uid'][obj_index])
obs_num = np.append(obs_num, old_objects['obs_num'][obj_index] + 1)
origin = np.append(origin, old_objects['origin'][obj_index])
else:
# obj_orig = get_origin_uid(obj, frame1, old_objects)
obj_orig = '-1'
origin = np.append(origin, obj_orig)
if obj_orig != '-1':
uid = np.append(uid, counter.next_cid(obj_orig))
else:
uid = np.append(uid, counter.next_uid())
obs_num = np.append(obs_num, 0)
id2 = pairs
current_objects = {'id1': id1, 'uid': uid, 'id2': id2,
'obs_num': obs_num, 'origin': origin}
current_objects = attach_last_heads(frame1, frame2, current_objects)
return current_objects, counter
def attach_last_heads(frame1, frame2, current_objects):
""" Attaches last heading information to current_objects dictionary. """
nobj = len(current_objects['uid'])
heads = np.ma.empty((nobj, 2))
for obj in range(nobj):
if ((current_objects['id1'][obj] > 0) and
(current_objects['id2'][obj] > 0)):
center1 = get_object_center(current_objects['id1'][obj], frame1)
center2 = get_object_center(current_objects['id2'][obj], frame2)
heads[obj, :] = center2 - center1
else:
heads[obj, :] = np.ma.array([-999, -999], mask=[True, True])
current_objects['last_heads'] = heads
return current_objects
def check_isolation(raw, filtered, grid_size, params):
""" Returns list of booleans indicating object isolation. Isolated objects
are not connected to any other objects by pixels greater than ISO_THRESH,
and have at most one peak. """
nobj = np.max(filtered)
min_size = params['MIN_SIZE'] / np.prod(grid_size[1:]/1000)
iso_filtered = get_filtered_frame(raw,
min_size,
params['ISO_THRESH'])
nobj_iso = np.max(iso_filtered)
iso = np.empty(nobj, dtype='bool')
for iso_id in np.arange(nobj_iso) + 1:
obj_ind = np.where(iso_filtered == iso_id)
objects = np.unique(filtered[obj_ind])
objects = objects[objects != 0]
if len(objects) == 1 and single_max(obj_ind, raw, params):
iso[objects - 1] = True
else:
iso[objects - 1] = False
return iso
def single_max(obj_ind, raw, params):
""" Returns True if object has at most one peak. """
max_proj = np.max(raw, axis=0)
smooth = ndimage.filters.gaussian_filter(max_proj, params['ISO_SMOOTH'])
padded = np.pad(smooth, 1, mode='constant')
obj_ind = [axis + 1 for axis in obj_ind] # adjust for padding
maxima = 0
for pixel in range(len(obj_ind[0])):
ind_0 = obj_ind[0][pixel]
ind_1 = obj_ind[1][pixel]
neighborhood = padded[(ind_0-1):(ind_0+2), (ind_1-1):(ind_1+2)]
max_ind = np.unravel_index(neighborhood.argmax(), neighborhood.shape)
if max_ind == (1, 1):
maxima += 1
if maxima > 1:
return False
return True
def get_object_prop(image1, grid1, field, record, params):
""" Returns dictionary of object properties for all objects found in
image1. """
id1 = []
center = []
grid_x = []
grid_y = []
area = []
longitude = []
latitude = []
field_max = []
max_height = []
volume = []
nobj = np.max(image1)
unit_dim = record.grid_size
unit_alt = unit_dim[0]/1000
unit_area = (unit_dim[1]*unit_dim[2])/(1000**2)
unit_vol = (unit_dim[0]*unit_dim[1]*unit_dim[2])/(1000**3)
raw3D = grid1.fields[field]['data'].data
for obj in np.arange(nobj) + 1:
obj_index = np.argwhere(image1 == obj)
id1.append(obj)
# 2D frame stats
center.append(np.median(obj_index, axis=0))
this_centroid = np.round(np.mean(obj_index, axis=0), 3)
grid_x.append(this_centroid[1])
grid_y.append(this_centroid[0])
area.append(obj_index.shape[0] * unit_area)
rounded = np.round(this_centroid).astype('i')
cent_met = np.array([grid1.y['data'][rounded[0]],
grid1.x['data'][rounded[1]]])
projparams = grid1.get_projparams()
lon, lat = pyart.core.transforms.cartesian_to_geographic(cent_met[1],
cent_met[0],
projparams)
longitude.append(np.round(lon[0], 4))
latitude.append(np.round(lat[0], 4))
# raw 3D grid stats
obj_slices = [raw3D[:, ind[0], ind[1]] for ind in obj_index]
field_max.append(np.max(obj_slices))
filtered_slices = [obj_slice > params['FIELD_THRESH']
for obj_slice in obj_slices]
heights = [np.arange(raw3D.shape[0])[ind] for ind in filtered_slices]
max_height.append(np.max(np.concatenate(heights)) * unit_alt)
volume.append(np.sum(filtered_slices) * unit_vol)
# cell isolation
isolation = check_isolation(raw3D, image1, record.grid_size, params)
objprop = {'id1': id1,
'center': center,
'grid_x': grid_x,
'grid_y': grid_y,
'area': area,
'field_max': field_max,
'max_height': max_height,
'volume': volume,
'lon': longitude,
'lat': latitude,
'isolated': isolation}
return objprop
def write_tracks(old_tracks, record, current_objects, obj_props):
""" Writes all cell information to tracks dataframe. """
print('Writing tracks for scan', record.scan)
nobj = len(obj_props['id1'])
scan_num = [record.scan] * nobj
uid = current_objects['uid']
new_tracks = pd.DataFrame({
'scan': scan_num,
'uid': uid,
'time': record.time,
'grid_x': obj_props['grid_x'],
'grid_y': obj_props['grid_y'],
'lon': obj_props['lon'],
'lat': obj_props['lat'],
'area': obj_props['area'],
'vol': obj_props['volume'],
'max': obj_props['field_max'],
'max_alt': obj_props['max_height'],
'isolated': obj_props['isolated']
})
new_tracks.set_index(['scan', 'uid'], inplace=True)
tracks = old_tracks.append(new_tracks)
return tracks
| bsd-2-clause |
vityurkiv/Ox | modules/porous_flow/doc/tests/relperm.py | 13 | 4564 | #!/usr/bin/env python
# Script to generate plots used in documentation from test results
import numpy as np
import matplotlib.pyplot as plt
# Analytical form of Corey's relative permeability curve
def corey(s, sr, sum_s_r, n):
# Effective saturation
seff = np.clip((s - sr) / (1.0 - sum_s_r), 0, 1)
# Relative permeability is then
relperm = np.power(seff, n);
return relperm
# Analytical form of van Genuchten's relative permeability
def vg(s, sr, sls, m):
# Effective saturation
seff = np.clip((s - sr) / (sls - sr), 0, 1)
# The liquid relative permeability is
relperm = np.sqrt(seff) * (1 - np.power(1 - np.power(seff, 1.0 / m), m))**2
# Relative permeability is then
return relperm
# Saturation of phase 0 varies linearly from 0 to 1
s0 = np.linspace(0, 1, 200)
################################################################################
#
# Corey relative permeabilities
#
# Case 1: residual saturation set to 0 for both phases, n = 1
#
# Read MOOSE simulation data
data = np.genfromtxt('../../tests/relperm/corey1_out_vpp_0001.csv', delimiter = ',', names = True, dtype = None)
plt.figure(1)
plt.plot(s0, corey(s0, 0, 0, 1), label = 'kr0')
plt.plot(data['s0aux'], data['kr0aux'], 'ob', label = 'kr0 (MOOSE)')
plt.plot(s0, corey(1 - s0, 0, 0, 1), label = 'kr1')
plt.plot(data['s0aux'], data['kr1aux'], 'og', label = 'kr1 (MOOSE)')
plt.xlabel('Phase 0 saturation (-)')
plt.ylabel('Relative permeability (-)')
plt.legend(loc = 'best')
plt.title('Corey relative permeability: $S_{0r} = 0, S_{1r} = 0, n = 1$')
plt.savefig("corey1_fig.pdf")
# Case 2: residual saturation set to 0 for both phases, and n = 2
#
# Read MOOSE simulation data
data = np.genfromtxt('../../tests/relperm/corey2_out_vpp_0001.csv', delimiter = ',', names = True, dtype = None)
plt.figure(2)
plt.plot(s0, corey(s0, 0, 0, 2), label = 'kr0')
plt.plot(data['s0aux'], data['kr0aux'], 'ob', label = 'kr0 (MOOSE)')
plt.plot(s0, corey(1 - s0, 0, 0, 2), label = 'kr1')
plt.plot(data['s0aux'], data['kr1aux'], 'og', label = 'kr1 (MOOSE)')
plt.xlabel('Phase 0 saturation (-)')
plt.ylabel('Relative permeability (-)')
plt.legend(loc = 'best')
plt.title('Corey relative permeability: $S_{0r} = 0, S_{1r} = 0, n = 2$')
plt.savefig("corey2_fig.pdf")
# Case 3: residual saturation set to 0.2 for phase 0, 0.3 for phase 1 and n = 2
#
# Read MOOSE simulation data
data = np.genfromtxt('../../tests/relperm/corey3_out_vpp_0001.csv', delimiter = ',', names = True, dtype = None)
plt.figure(3)
plt.plot(s0, corey(s0, 0.2, 0.5, 2), label = 'kr0')
plt.plot(data['s0aux'], data['kr0aux'], 'ob', label = 'kr0 (MOOSE)')
plt.plot(s0, corey(1 - s0, 0.3, 0.5, 2), label = 'kr1')
plt.plot(data['s0aux'], data['kr1aux'], 'og', label = 'kr1 (MOOSE)')
plt.xlabel('Phase 0 saturation (-)')
plt.ylabel('Relative permeability (-)')
plt.legend(loc = 'best')
plt.title('Corey relative permeability: $S_{0r} = 0, S_{1r} = 0, n = 2$')
plt.ylim([-0.01, 1.01])
plt.savefig("corey3_fig.pdf")
################################################################################
#
# van Genuchten relative permeabilities
#
# Case 1: residual saturation set to 0 for both phases, m = 0.5, sls = 1
#
# Read MOOSE simulation data
data = np.genfromtxt('../../tests/relperm/vangenuchten1_out_vpp_0001.csv', delimiter = ',', names = True, dtype = None)
plt.figure(4)
plt.plot(s0, vg(s0, 0, 1, 0.5), label = 'kr0')
plt.plot(data['s0aux'], data['kr0aux'], 'ob', label = 'kr0 (MOOSE)')
plt.plot(s0, corey(1 - s0, 0, 0, 2), label = 'kr1')
plt.plot(data['s0aux'], data['kr1aux'], 'og', label = 'kr1 (MOOSE)')
plt.xlabel('Phase 0 saturation (-)')
plt.ylabel('Relative permeability (-)')
plt.legend(loc = 'best')
plt.title('van Genuchten relative permeability: $S_{0r} = 0, S_{1r} = 0, m = 0.5$')
plt.ylim([-0.01, 1.01])
plt.savefig("vg1_fig.pdf")
# Case 2: residual saturation set to 0.25 for phase 0, 0 for phase 1, m = 0.4, sls = 1
#
# Read MOOSE simulation data
data = np.genfromtxt('../../tests/relperm/vangenuchten2_out_vpp_0001.csv', delimiter = ',', names = True, dtype = None)
plt.figure(5)
plt.plot(s0, vg(s0, 0.25, 1, 0.4), label = 'kr0')
plt.plot(data['s0aux'], data['kr0aux'], 'ob', label = 'kr0 (MOOSE)')
plt.plot(s0, corey(1 - s0, 0, 0.25, 2), label = 'kr1')
plt.plot(data['s0aux'], data['kr1aux'], 'og', label = 'kr1 (MOOSE)')
plt.xlabel('Phase 0 saturation (-)')
plt.ylabel('Relative permeability (-)')
plt.legend(loc = 'best')
plt.title('van Genuchten relative permeability: $S_{0r} = 0.25, S_{1r} = 0, m = 0.4$')
plt.ylim([-0.01, 1.01])
plt.savefig("vg2_fig.pdf")
| lgpl-2.1 |
datapythonista/pandas | pandas/tests/generic/test_to_xarray.py | 2 | 4128 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
date_range,
)
import pandas._testing as tm
@td.skip_if_no("xarray")
class TestDataFrameToXArray:
@pytest.fixture
def df(self):
return DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": Categorical(list("abc")),
"g": date_range("20130101", periods=3),
"h": date_range("20130101", periods=3, tz="US/Eastern"),
}
)
def test_to_xarray_index_types(self, index, df):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if len(index) == 0:
pytest.skip("Test doesn't make sense for empty index")
from xarray import Dataset
df.index = index[:3]
df.index.name = "foo"
df.columns.name = "bar"
result = df.to_xarray()
assert result.dims["foo"] == 3
assert len(result.coords) == 1
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, Dataset)
# idempotency
# datetimes w/tz are preserved
# column names are lost
expected = df.copy()
expected["f"] = expected["f"].astype(object)
expected.columns.name = None
tm.assert_frame_equal(result.to_dataframe(), expected)
def test_to_xarray_empty(self, df):
from xarray import Dataset
df.index.name = "foo"
result = df[0:0].to_xarray()
assert result.dims["foo"] == 0
assert isinstance(result, Dataset)
def test_to_xarray_with_multiindex(self, df):
from xarray import Dataset
# MultiIndex
df.index = MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
result = df.to_xarray()
assert result.dims["one"] == 1
assert result.dims["two"] == 3
assert len(result.coords) == 2
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, Dataset)
result = result.to_dataframe()
expected = df.copy()
expected["f"] = expected["f"].astype(object)
expected.columns.name = None
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xarray")
class TestSeriesToXArray:
def test_to_xarray_index_types(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
from xarray import DataArray
ser = Series(range(len(index)), index=index, dtype="int64")
ser.index.name = "foo"
result = ser.to_xarray()
repr(result)
assert len(result) == len(index)
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
# idempotency
tm.assert_series_equal(result.to_series(), ser)
def test_to_xarray_empty(self):
from xarray import DataArray
ser = Series([], dtype=object)
ser.index.name = "foo"
result = ser.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
def test_to_xarray_with_multiindex(self):
from xarray import DataArray
mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])
ser = Series(range(6), dtype="int64", index=mi)
result = ser.to_xarray()
assert len(result) == 2
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, DataArray)
res = result.to_series()
tm.assert_series_equal(res, ser)
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/scipy/spatial/tests/test__plotutils.py | 71 | 1463 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| gpl-3.0 |
stephenfloor/extract-transcript-regions | GTF.py | 3 | 2054 | #!/usr/bin/env python
"""
GTF.py
Kamil Slowikowski
December 24, 2013
Read GFF/GTF files. Works with gzip compressed files and pandas.
http://useast.ensembl.org/info/website/upload/gff.html
Downloaded by SNF on 12/30/14 from https://gist.github.com/slowkow/8101481
- pandas support removed to minimize package requirements
"""
from collections import defaultdict
import gzip
import re
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = re.split(R_SEMICOLON, fields[8])
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value
| gpl-2.0 |
chendaniely/google_scholar_citation_network | src/expand_graph.py | 1 | 3717 | import pandas as pd
import requests
from random import randint, random
from time import sleep
from bs4 import BeautifulSoup
import scholar
import scrape
class GoogleScholarArticleSimple(scrape.CitationResults):
def __init__(self):
self.citation_url_generic = 'https://scholar.google.com/scholar?start={}&hl=en&as_sdt=2005&sciodt=0,5&cites={}&scipsc='
self.cluster_id = None
def set_search_soup(self, first_page=0):
search_page_url = current_article.citation_url_generic.format(first_page, self.cluster_id)
r = requests.get(search_page_url)
self.soup = BeautifulSoup(r.text)
return(self)
def main():
data = pd.DataFrame()
f = open('../results/5556531000720111691.csv.bkup', 'r')
for idx, line in enumerate(f):
data_values = line.split(',', 2)
to_append = pd.DataFrame([data_values])
data = data.append(to_append)
f.close()
#
# for each cluster id
#
for from_cluster_id in range(data.shape[0])[:1]: # just get the first one, for now\
print(from_cluster_id)
cluster_id = data.iloc[from_cluster_id, 0]
try:
cluster_id = int(cluster_id)
except ValueError:
continue
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
query = scholar.SearchScholarQuery()
query_cluster = scholar.ClusterScholarQuery(cluster=cluster_id)
querier.send_query(query_cluster)
#
# for each article in search results
#
for article in querier.articles[:1]: # get first article result, for now
article.attrs.get('url_citations')[0]
current_article = GoogleScholarArticleSimple()
current_article.cluster_id = cluster_id
current_article.set_search_soup().set_num_search_results().set_num_search_pages()
# gs_r = current_article.soup.find_all("div", class_="gs_r")
#
# for each search page result of citing article
#
for page_idx, search_page_number in enumerate(range(current.article.num_search_pages)[:1]): # get first page result for now
url = citations_url_generic.format(search_page_number * 10, from_cluster_id)
r = requests.get(url)
soup = BeautifulSoup(r.text)
gs_r = soup.find_all("div", class_="gs_r")
# print(len(gs_r))
output_file_path = '../results/01-{}.csv'.format(from_cluster_id)
f = open(output_file_path, 'w')
f.close()
#
# for each search result
#
for citing_article_soup in gs_r:
result_article = DanGoogleScholarArticle(soup=citing_article_soup)
result_article.parse_title()
# print(result_article.title)
result_article.parse_cluster_id()
# seed_cluster_id = result_article.cluster_id
# print(seed_cluster_id)
f = open(output_file_path, 'a+')
str_to_write = '{}\t|\t{}\t|\t{}\n'.\
format(result_article.cluster_id,
cluster_id,
citing_article_soup)
f.write(str_to_write)
f.close()
sleep_time = random() * randint(10, 100)
print('cluster_id: {}, page: {}, sleeping: {}'.format(from_cluster_id, page_number, sleep_time))
sleep(sleep_time)
if __name__ == '__main__':
main()
| mit |
samnashi/howdoflawsgetlonger | model_loader.py | 1 | 33387 | from __future__ import print_function
import numpy as np
from random import shuffle
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from keras.models import Sequential, Model, load_model
from keras.utils import plot_model
from keras.layers import Dense, LSTM, GRU, Flatten, Input, Reshape, TimeDistributed, Bidirectional, Dense, Dropout, \
Activation, Flatten, Conv1D, MaxPooling1D, GlobalAveragePooling1D, AveragePooling1D, concatenate, BatchNormalization
from keras.initializers import lecun_normal, glorot_normal,orthogonal
from keras.regularizers import l1, l1_l2, l2
from keras import metrics
from keras.optimizers import adam, rmsprop
import pandas as pd
import scipy.io as sio
from keras.callbacks import CSVLogger, TerminateOnNaN
import os
import csv
import json
import scattergro_utils as sg_utils
import sklearn.preprocessing
from Conv1D_ActivationSearch_BigLoop import pair_generator_1dconv_lstm #this is the stacked one.
from Conv1D_LSTM_Ensemble import pair_generator_1dconv_lstm_bagged
from LSTM_TimeDist import pair_generator_lstm
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, explained_variance_score, \
r2_score, mean_squared_log_error
from AuxRegressor import create_model_list,create_testing_set,create_training_set, mape
# @@@@@@@@@@@@@@ RELATIVE PATHS @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Base_Path = "./"
image_path = "./images/"
train_path = "./train/"
test_path = "./test/"
analysis_path = "./analysis/"
models_path = analysis_path + "models_to_load/"
results_path = analysis_path + "model_loader_results/"
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
def individual_output_scorer(metrics_list):
score_df = pd.DataFrame() #set index
return score_df
def create_generator(model_type, data, labels, start_at = 0,scaled = True,
type_scaler = 'standard_per_batch', gbs = 128,
upc = False, gen_pad = 128, lab_dim = 4):
if model_type == 'conv_lstm_bagged' and cond_has_dpc == True:
# create conv-lstm generator
train_generator = pair_generator_1dconv_lstm_bagged(train_array, label_array, start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active, scaler_type=active_scaler_type,
label_dims=4, generator_pad=GENERATOR_PAD)
if model_type == 'normal_lstm' or model_type == 'bidir_LSTM':
# create lstm-only generator
train_generator = pair_generator_lstm(train_array, label_array, start_at=shuffled_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=False, label_dims=4)
if model_type == 'conv':
train_generator = pair_generator_1dconv_lstm(train_array, label_array,
start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,
scaler_type=active_scaler_type, label_dims=4,
generator_pad=GENERATOR_PAD)
if model_type == 'conv':
generator = pair_generator_1dconv_lstm(data=data,labels=labels,start_at=start_at,scaled=scaled,scaler_type=type_scaler,
generator_batch_size=gbs, use_precomputed_coeffs = upc, label_dims=lab_dim)
return generator
# def determine_state():
# return state
#
# class State:
if __name__ == "__main__":
############################## RUNNING PARAMETERS #########################################################################
num_sequence_draws = 10 # how many times the training corpus is sampled.
GENERATOR_BATCH_SIZE = 128
GENERATOR_PAD = 128 #for the conv-only and conv-LSTM generator.
num_epochs = 3 # individual. like how many times is the net trained on that sequence consecutively
finetune = True
test_only = False # no training. if finetune is also on, this'll raise an error.
scaler_active = True
use_precomp_sscaler = False
active_scaler_type = 'standard_per_batch'
if active_scaler_type != "None":
assert (scaler_active != False) # makes sure that if a scaler type is specified, the "scaler active" flag is on (the master switch)
base_seq_circumnav_amt = 1.0 # default value, the only one if adaptive circumnav is False
adaptive_circumnav = True
if adaptive_circumnav == True:
aux_circumnav_onset_draw = 3
assert (aux_circumnav_onset_draw < num_sequence_draws)
aux_seq_circumnav_amt = 1.5 # only used if adaptive_circumnav is True
assert (base_seq_circumnav_amt != None and aux_seq_circumnav_amt != None and aux_circumnav_onset_draw != None)
shuffle_training_generator = False
shuffle_testing_generator = False #****
save_preds = False
save_figs = False
#######################################################################################################
# LOAD MODEL AND CHECK
metrics_list = ['mae', 'mape', 'mse', 'msle']
train_set_filenames = create_training_set()
test_set_filenames = create_testing_set()
model_filenames = create_model_list()
#TODO: THIS BELOW!!!!
for model in model_filenames:
identifier_post_training = model
raw_base_model = load_model(models_path + model)
print("model loaded, ", str(model))
cond_conv = any(isinstance(layer, Conv1D) for layer in raw_base_model.layers)
cond_lstm = any(isinstance(layer, LSTM) for layer in raw_base_model.layers)
cond_bidir = any(isinstance(layer, Bidirectional) for layer in raw_base_model.layers)
cond_has_dpc = any(layer.name == 'dense_post_concat' for layer in raw_base_model.layers)
print("is there an LSTM layer?", any(isinstance(layer, LSTM) for layer in raw_base_model.layers))
print("is there dpc?", any(layer.name == 'dense_post_concat' for layer in raw_base_model.layers))
# cond_conv_lstm_bagged = cond_conv == True and cond_lstm == True
if cond_lstm == True and cond_conv == True:
model_type = 'conv_lstm_bagged'
if cond_lstm == True and cond_conv == False:
model_type = 'normal_lstm'
if cond_lstm == True and cond_conv == False and cond_bidir == True:
model_type = 'bidir_lstm'
if cond_conv == True and cond_lstm == False:
model_type = 'conv'
#should be any item in metrics_list not in raw_base_model_metrics. any(metric not in raw_base_model.metrics for metric in metrics_list)
cond_mismatched_metrics = any(metric not in raw_base_model.metrics for metric in metrics_list)
#cond_not_multioutput = len(raw_base_model.metrics) >
if cond_mismatched_metrics == True: #e.g. missing mse, recompile using the existing optimizers.
print("mismatched metrics, model's metrics are currently: ",raw_base_model.metrics)
existing_optimizer = raw_base_model.optimizer
existing_loss = raw_base_model.loss
raw_base_model.compile(optimizer = existing_optimizer,loss=existing_loss,metrics=metrics_list)
print("model type is: ",model_type, " with metrics: ", raw_base_model.metrics_names)
plot_model(raw_base_model, to_file=analysis_path + 'model_' + identifier_post_training + '_' + str(model)[:-4] + '.png', show_shapes=True)
#####################################################################################################
weights_file_name = None
identifier_post_training = 'f1' #placeholder for weights
identifier_pre_training = 'f2' #placeholder, for weights
if finetune == False:
weights_present_indicator = os.path.isfile(
'Weights_' + str(num_sequence_draws) + identifier_post_training + '.h5')
print("Are weights (with the given name to be saved as) already present? {}".format(
weights_present_indicator))
else:
weights_present_indicator = os.path.isfile('Weights_' + identifier_pre_training + '.h5')
print("Are weights (with the given name) to initialize with present? {}".format(weights_present_indicator))
if model is not None:
weights_present_indicator = True
print("model loaded instead, using: ", str(model))
assert (finetune == False and weights_present_indicator == True) == False
#initialize callbacks
csv_logger = CSVLogger(filename='./analysis/logtrain' + identifier_post_training + ".csv", append=True)
nan_terminator = TerminateOnNaN()
active_seq_circumnav_amt = base_seq_circumnav_amt
############ TRAINING SET AND LABEL LOADS INTO MEMORY ###################################
for i in range(0, num_sequence_draws):
index_to_load = np.random.randint(0, len(train_set_filenames)) # switch to iterations
files = train_set_filenames[index_to_load]
print("files: {}, draw # {} out of {}".format(files, i,num_sequence_draws))
data_load_path = train_path + '/data/' + files[0]
label_load_path = train_path + '/label/' + files[1]
train_array = np.load(data_load_path)
if train_array.shape[1] != 11: #cut off the 1st column, which is the stepindex just for rigidity
train_array = train_array[:, 1:]
label_array = np.load(label_load_path)
if label_array.shape[1] != 4: #cut off the 1st column, which is the stepindex just for rigidity
label_array = label_array[:,1:]
#TODO:if shuffle_training_generator == True:
nonlinear_part_starting_position = GENERATOR_BATCH_SIZE * ((train_array.shape[0] // GENERATOR_BATCH_SIZE) - 3)
shuffled_starting_position = np.random.randint(0, nonlinear_part_starting_position)
if shuffle_training_generator == True:
active_starting_position = shuffled_starting_position #doesn't start from 0, if the model is still in the 1st phase of training
if shuffle_training_generator == False:
active_starting_position = 0
#adaptive circumnav governs the training from one generator upon initialization.
if adaptive_circumnav == True and i >= aux_circumnav_onset_draw: #overrides the shuffle.
active_seq_circumnav_amt = aux_seq_circumnav_amt
active_starting_position = 0
if model_type == 'conv_lstm_bagged' and cond_has_dpc == True:
#create conv-lstm generator
train_generator = pair_generator_1dconv_lstm_bagged(train_array, label_array, start_at=active_starting_position,generator_batch_size=GENERATOR_BATCH_SIZE,use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,scaler_type=active_scaler_type,label_dims=4,generator_pad=GENERATOR_PAD)
# training_hist = raw_base_model.fit_generator(train_generator,
# steps_per_epoch=active_seq_circumnav_amt * (train_array.shape[0] // GENERATOR_BATCH_SIZE),
# epochs=num_epochs, verbose=2,
# callbacks=[csv_logger, nan_terminator])
if model_type == 'normal_lstm' or model_type == 'bidir_LSTM':
# create lstm-only generator
train_generator = pair_generator_lstm(train_array, label_array, start_at=shuffled_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=False, label_dims=4)
# training_hist = raw_base_model.fit_generator(train_generator, epochs=num_epochs,
# steps_per_epoch=1 * (train_array.shape[0] // GENERATOR_BATCH_SIZE),
# callbacks=[csv_logger, nan_terminator], verbose=2)
if model_type == 'conv':
train_generator = pair_generator_1dconv_lstm(train_array, label_array,
start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,
scaler_type=active_scaler_type, label_dims=4,
generator_pad=GENERATOR_PAD)
# training_hist = raw_base_model.fit_generator(train_generator, steps_per_epoch=active_seq_circumnav_amt * (train_array.shape[0] // GENERATOR_BATCH_SIZE),
# epochs=num_epochs, verbose=2, callbacks=[csv_logger, nan_terminator])
training_hist = raw_base_model.fit_generator(train_generator,
steps_per_epoch=active_seq_circumnav_amt * (train_array.shape[0] // GENERATOR_BATCH_SIZE),
epochs=num_epochs, verbose=2,
callbacks=[csv_logger, nan_terminator])
trained_model = raw_base_model
if weights_present_indicator == True and finetune == True:
print("fine-tuning/partial training session completed.")
weights_file_name = 'Weights_' + str(num_sequence_draws) + identifier_post_training + '.h5'
#trained_model.save_weights(analysis_path + weights_file_name)
trained_model.save(results_path + 'model_' + identifier_post_training + '_' + str(model)[:-3] + '.h5')
print("after {} iterations, model weights is saved as {}".format(num_sequence_draws * num_epochs,
weights_file_name))
if weights_present_indicator == False and finetune == False: # fresh training
print("FRESH training session completed.")
weights_file_name = 'Weights_' + str(num_sequence_draws) + identifier_post_training + '.h5'
#trained_model.save_weights(weights_file_name)
trained_model.save(results_path + 'model_' + identifier_post_training + '_' + str(model)[:-3] + '.h5')
print("after {} iterations, model weights is saved as {}".format(num_sequence_draws * num_epochs,
weights_file_name))
else: # TESTING ONLY! bypass weights present indicator.
weights_file_name = 'Weights_' + str(num_sequence_draws) + identifier_post_training + '.h5'
# test_weights_present_indicator
print("weights_file_name before the if/else block to determine the test flag is: {}".format(
weights_file_name))
if weights_file_name is not None:
# means it went through the training loop
if os.path.isfile(weights_file_name) == False:
print("Weights from training weren't saved as .h5 but is retained in memory.")
test_weights_present_indicator = True
print("test_weights_present_indicator is {}".format(test_weights_present_indicator))
weights_to_test_with_fname = "weights retained in runtime memory"
if os.path.isfile(weights_file_name) == True:
test_weights_present_indicator = True
print("test weights present indicator based on the presence of {} is {}".format(weights_file_name,
test_weights_present_indicator))
weights_to_test_with_fname = weights_file_name
model.load_weights(weights_to_test_with_fname, by_name=True)
if test_only == True:
trained_model = raw_base_model
weights_to_test_with_fname = 'Weights_' + identifier_pre_training + '.h5' # hardcode the previous epoch number UP ABOVE
weights_file_name = weights_to_test_with_fname # piggybacking the old flag. the one without fname is to refer to post training weights.
trained_model.load_weights(weights_to_test_with_fname, by_name=True)
test_weights_present_indicator = os.path.isfile(weights_to_test_with_fname)
if weights_file_name == None:
print(
"Warning: check input flags. No training has been done, and testing is about to be performed with weights labeled as POST TRAINING weights")
test_weights_present_indicator = os.path.isfile(
'Weights_' + str(num_sequence_draws) + identifier_post_training + '.h5')
print(
"weights_file_name after the if/else block to determine the test flag is: {}".format(weights_file_name))
if test_weights_present_indicator == True:
# the testing part
print("TESTING PHASE, with weights {}".format(weights_to_test_with_fname))
# load data multiple times.
data_filenames = list(set(os.listdir(test_path + "data")))
# print("before sorting, data_filenames: {}".format(data_filenames))
data_filenames.sort()
# print("after sorting, data_filenames: {}".format(data_filenames))
label_filenames = list(set(os.listdir(test_path + "label")))
label_filenames.sort()
# print("label_filenames: {}".format(data_filenames))
assert len(data_filenames) == len(label_filenames)
combined_test_filenames = zip(data_filenames, label_filenames)
# print("before shuffling: {}".format(combined_test_filenames))
shuffle(combined_test_filenames)
print("after shuffling: {}".format(combined_test_filenames)) # shuffling works ok.
i = 0
# TODO: still only saves single results.
score_rows_list = []
score_rows_list_scikit = []
score_rows_list_scikit_raw = []
for files in combined_test_filenames:
i = i + 1
data_load_path = test_path + '/data/' + files[0]
label_load_path = test_path + '/label/' + files[1]
# print("data/label load path: {} \n {}".format(data_load_path,label_load_path))
test_array = np.load(data_load_path)
if test_array.shape[1] != 11: # cut off the 1st column, which is the stepindex just for rigidity
test_array = test_array[:, 1:]
label_array = np.load(label_load_path)
if label_array.shape[1] != 4: # cut off the 1st column, which is the stepindex just for rigidity
label_array = label_array[:, 1:]
# --------COMMENTED OUT BECAUSE OF SCALER IN THE GENERATOR-----------------------------------
# test_array = np.reshape(test_array, (1, test_array.shape[0], test_array.shape[1]))
# label_array = np.reshape(label_array,(1,label_array.shape[0],label_array.shape[1])) #label doesn't need to be 3D
# print("file: {} data/label shape: {}, {}".format(files[0],test_array.shape, label_array.shape))
print("sequence being tested: ",files[0], ", number ", i, "out of ", len(combined_test_filenames))
# print("Metrics: {}".format(model.metrics_names))
# steps per epoch is how many times that generator is called
nonlinear_part_starting_position = GENERATOR_BATCH_SIZE * ((test_array.shape[0] // GENERATOR_BATCH_SIZE) - 3)
shuffled_starting_position = np.random.randint(0, nonlinear_part_starting_position)
if shuffle_testing_generator == True:
active_starting_position = shuffled_starting_position # doesn't start from 0, if the model is still in the 1st phase of training
if shuffle_testing_generator == False:
active_starting_position = 0
#define the test generator parameters.
if model_type == 'conv_lstm_bagged' and cond_has_dpc == True:
# create conv-lstm generator
test_generator = pair_generator_1dconv_lstm_bagged(test_array, label_array, start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,
scaler_type=active_scaler_type,label_dims=4)
if model_type == 'normal_lstm' or model_type == 'bidir_LSTM':
# create lstm-only generator
test_generator = pair_generator_lstm(test_array, label_array,
start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=False, label_dims=4)
if model_type == 'conv':
test_generator = pair_generator_1dconv_lstm(test_array, label_array,
start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,
scaler_type=active_scaler_type, label_dims=4,
generator_pad=GENERATOR_PAD)
score = trained_model.evaluate_generator(test_generator,
steps=(test_array.shape[0] // GENERATOR_BATCH_SIZE),
max_queue_size=test_array.shape[0], use_multiprocessing=False)
print("scores: {}".format(score))
metrics_check = (metrics_list == trained_model.metrics_names)
if metrics_check == False:
metrics_list = trained_model.metrics_names
row_dict = {}
row_dict_scikit = {} #for the scikit-based metrics.
row_dict_scikit_raw = {} #for the raw-value scikit-based metrics
row_dict['seq_name'] = str(files[0])[:-4]
for item in metrics_list:
row_dict[str(item)] = score[metrics_list.index(item)] # 'loss'
score_rows_list.append(row_dict)
#SECOND TIME FOR PREDICTIONS LOGGING. INITIALIZE GENERATORS
active_starting_position = 0 #hardcode for the actual predictions logging?
if model_type == 'conv_lstm_bagged' and cond_has_dpc == True:
# create conv-lstm generator
test_generator = pair_generator_1dconv_lstm_bagged(test_array, label_array, start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,
scaler_type=active_scaler_type, label_dims=4)
if model_type == 'normal_lstm' or model_type == 'bidir_LSTM':
# create lstm-only generator
test_generator = pair_generator_lstm(test_array, label_array,
start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=False, label_dims=4)
if model_type == 'conv':
#create conv-only generator
test_generator = pair_generator_1dconv_lstm(test_array, label_array,
start_at=active_starting_position,
generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=use_precomp_sscaler,
scaled=scaler_active,
scaler_type=active_scaler_type, label_dims=4,
generator_pad=GENERATOR_PAD)
prediction_length = (int(1.0 * (GENERATOR_BATCH_SIZE * (label_array.shape[0] // GENERATOR_BATCH_SIZE))))
test_i = 0
# Kindly declare the shape
preds_ndims = len(trained_model.output_layers[0].output_shape) #if 3D, this should be 3. [0] because this is the combined output.
preds_2d_shape = [prediction_length, 4]
preds_3d_shape = [1, prediction_length, 4]
if preds_ndims == 3:
y_pred = np.zeros(shape = preds_3d_shape)
y_truth = np.zeros(shape = preds_3d_shape)
if preds_ndims == 2:
y_pred = np.zeros(shape=preds_2d_shape)
y_truth = np.zeros(shape=preds_2d_shape)
while test_i <= prediction_length - GENERATOR_BATCH_SIZE: #TODO: make sure this matches. [0] not necessary for bagged data, necessary for bagged labels.
x_test_batch, y_test_batch = test_generator.next()
if model_type == 'conv_lstm_bagged': #this has two outputs
y_pred[0, test_i:test_i + GENERATOR_BATCH_SIZE, :] = (trained_model.predict_on_batch(x_test_batch))[0]
if model_type != 'conv_lstm_bagged':
y_pred[0, test_i:test_i + GENERATOR_BATCH_SIZE, :] = trained_model.predict_on_batch(x_test_batch) #needs the entire output. #needs the entire output.
if model_type == 'conv_lstm_bagged': #duplicated outputs. pick the first one.
y_truth[0, test_i:test_i + GENERATOR_BATCH_SIZE, :] = y_test_batch[0]
if model_type != 'conv_lstm_bagged':
y_truth[0, test_i:test_i + GENERATOR_BATCH_SIZE, :] = y_test_batch
test_i += GENERATOR_BATCH_SIZE
# print("array shape {}".format(y_prediction[0,int(0.95*prediction_length), :].shape))
#gotta remove a dimension if the predictions are 3D, otherwise the scikit metrics won't work.
if len(y_pred.shape) == 3:
y_pred = np.reshape(y_pred, newshape = preds_2d_shape)
y_truth = np.reshape(y_truth, newshape = preds_2d_shape)
ind_f3 = y_pred.shape[0] - 3 * GENERATOR_BATCH_SIZE
row_dict_scikit['seq_name'] = str(files[0])[:-4]
row_dict_scikit_raw['seq_name'] = str(files[0])[:-4]
row_dict_scikit['mse'] = mean_squared_error(y_true = y_truth, y_pred = y_pred,multioutput = 'uniform_average')
row_dict_scikit['mse_f3'] = mean_squared_error(y_true=y_truth[ind_f3:,:], y_pred=y_pred[ind_f3:,:],
multioutput='uniform_average')
raw_mse = list(mean_squared_error(y_true=y_truth, y_pred=y_pred,multioutput='raw_values'))
for flaw in range(0,len(raw_mse)):
row_dict_scikit_raw['mse_' + str(flaw)] = raw_mse[flaw]
raw_mse_f3 = list(mean_squared_error(y_true=y_truth[ind_f3:,:], y_pred=y_pred[ind_f3:,:],multioutput='raw_values'))
for flaw in range(0, len(raw_mse_f3)):
row_dict_scikit_raw['mse_f3_' + str(flaw)] = raw_mse_f3[flaw]
row_dict_scikit['mae'] = mean_absolute_error(y_true = y_truth, y_pred = y_pred,multioutput = 'uniform_average')
row_dict_scikit['mae_f3'] = mean_absolute_error(y_true=y_truth[ind_f3:,:], y_pred=y_pred[ind_f3:,:],
multioutput='uniform_average')
raw_mae = list(mean_absolute_error(y_true=y_truth, y_pred=y_pred,multioutput='raw_values'))
for flaw in range(0,len(raw_mae)):
row_dict_scikit_raw['mae_' + str(flaw)] = raw_mae[flaw]
raw_mae_f3 = list(mean_absolute_error(y_true=y_truth[ind_f3:,:], y_pred=y_pred[ind_f3:,:],multioutput='raw_values'))
for flaw in range(0, len(raw_mae_f3)):
row_dict_scikit_raw['mae_f3_' + str(flaw)] = raw_mae_f3[flaw]
# row_dict_scikit['msle'] = mean_squared_log_error(y_true=y_truth, y_pred=y_pred, multioutput='uniform_average')
# row_dict_scikit['msle_f3'] = mean_squared_log_error(y_true=y_truth[ind_f3:,:], y_pred=y_pred[ind_f3:,:],
# multioutput='uniform_average')
score_rows_list_scikit.append(row_dict_scikit)
score_rows_list_scikit_raw.append(row_dict_scikit_raw)
#print('row_dict sk keys: ', row_dict_scikit.keys(), "row dict sk values: ", row_dict_scikit.values())
if save_preds == True:
np.save(analysis_path + 'preds/preds_' + identifier_post_training + str(files[0])[:-4] + '.npy',
arr=y_pred)
# y_prediction_temp = y_truth
# y_truth = np.reshape(y_truth,
# newshape=(y_prediction_temp.shape[1], y_prediction_temp.shape[2]))
# label_truth = label_array[0:y_truth.shape[0], :]
# # print (label_truth.shape)
# label_truth_temp = label_truth
# scaler_output = sklearn.preprocessing.StandardScaler() # TODO: this should use the precomputed coeffs as well...
# #scaler_output = set_standalone_scaler_params(scaler_output)
# # print("")
# label_truth = scaler_output.transform(X=label_truth_temp)
#
# resample_interval = 16
# label_truth = label_truth[::resample_interval, :]
# y_truth = y_truth[::resample_interval, :]
score_df = pd.DataFrame(data=score_rows_list, columns=score_rows_list[0].keys())
score_scikit_df = pd.DataFrame(data=score_rows_list_scikit,columns=score_rows_list_scikit[0].keys())
score_scikit_raw_df = pd.DataFrame(data=score_rows_list_scikit_raw,columns=score_rows_list_scikit_raw[0].keys())
if shuffle_testing_generator == False:
score_df.to_csv(analysis_path + 'scores_' + model_type + '_' + str(model)[:-3] + '.csv')
score_scikit_df.to_csv(analysis_path + 'scores_sk_' + model_type + '_' + str(model)[:-3] + '.csv')
score_scikit_raw_df.to_csv(analysis_path + 'scores_sk_raw_' + model_type + '_' + str(model)[:-3] + '.csv')
if shuffle_testing_generator == True:
score_df.to_csv(analysis_path + 'scores_' + model_type + '_' + str(model)[:-3] + 'shf_test.csv')
score_scikit_df.to_csv(analysis_path + 'scores_sk_' + model_type + '_' + str(model)[:-3] + 'shf_test.csv')
score_scikit_raw_df.to_csv(analysis_path + 'scores_sk_' + model_type + '_' + str(model)[:-3] + 'shf_test.csv')
# print(len(y_prediction))
#move all this junk into the training loop.
#TODO check if the API are actually the same. if not, update.
#TODO: initialize csvlogger
#create conv-only generator
# model.compile(loss={'combined_output': 'mape', 'lstm_output': 'mse'},
# optimizer=optimizer_used, metrics=metrics_list) | gpl-3.0 |
Aufuray/ross-sea-project | app/models/image_nd.py | 1 | 3950 | import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from tools import data
class ImageND(object):
SENSOR = None
def __init__(self, filename, dimensions=3):
if dimensions < 3:
print "The image doesn't have the minimum of 3 dimensions"
sys.exit(1)
self.dimensions = dimensions
self.filename = filename
self.filepath = os.path.join(data.DATA_DIR, self.filename)
self.title = filename[2:15]
def __validate(self, image):
"""
Validate image, check that's n-'dimensions' channel image
"""
if image is not None and len(image.shape) >= self.dimensions:
return True
return False
def image(self):
"""
Returns the raw ndarray image
:rtype: ndarray
"""
image = data.mat_file(self.filepath).get(self.SENSOR)
if not self.__validate(image):
print "Invalid dimensions or sensor {0} isn't in the image".format(
self.sensor)
sys.exit(1)
return np.dstack(image)
def nan_percentage(self):
nan_count = np.count_nonzero(~np.isnan(self.image()))
return (nan_count / self.image().size) * 100
def date(self):
return data.parse_date(self.filename)
def show(self, colorbar=True):
plt.imshow(self.image())
plt.title(self.filename)
if colorbar:
plt.colorbar()
plt.show()
# =====================================
# Analysis
# =====================================
def rgb(self):
"""
Return 3-tuple with (r, g, b)
"""
red = self.channel("red")
green = self.channel("green")
blue = self.channel("blue")
return (red, green, blue)
def channel(self, channel=None):
"""
This function is to be overwritten in by subclass
"""
return None
class IbandImage(ImageND):
SENSOR = "ibands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red, green, blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
class MbandImage(ImageND):
SENSOR = "mbands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 2]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 0]
else:
print "Channel requested wasn't red, green or blue"
class FcImage(ImageND):
SENSOR = "fc"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
| mit |
mclaughlin6464/pylearn2 | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
FYP-DES5/deepscan-core | util/denoise.py | 1 | 7282 | import cv2
# import gdfmm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import sys
import numpy as np
class EdgeImprover:
def __init__(self, voxels, gridsize, minX, minY, maxX, maxY):
self.voxels = voxels
self.gridsize = gridsize
self.mask = np.zeros((3 + maxX - minX, 3 + maxY - minY), dtype=np.uint8)
self.offset = (1 - minX, 1 - minY)
for k in self.voxels.keys():
self.mask[tuple(np.array(k) + self.offset)] = 255
self.edge = self.mask - cv2.erode(self.mask, cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)))
self.toBeDeleted = []
def run(self):
it = np.nditer(self.edge, flags=['multi_index'])
frequencies = {}
while not it.finished:
if it[0] != 0:
it.iternext()
continue
point = tuple(np.array(it.multi_index) - self.offset)
if point not in self.voxels:
it.iternext()
continue
frequency = len(self.voxels[point])
frequencies[frequency] = 1 + frequencies.get(frequency, 0)
it.iternext()
modeGridPoints = max(frequencies, key=lambda k: frequencies[k])
it = np.nditer(self.edge, flags=['multi_index'])
while not it.finished:
if it[0] == 0:
it.iternext()
continue
point = tuple(np.array(it.multi_index) - self.offset)
points = self.__getNeighborhoodPoints(self.voxels, *point)
centroid, ns = self.__fitPointsToPlanes(points)
center = np.array(point, dtype=np.float64) * self.gridsize
targetAreaRatio = len(self.voxels[point]) / float(modeGridPoints)
xy = self.__calculateBestSample(center, centroid, self.gridsize, targetAreaRatio)
new = [self.__genVFromXYNNN(xy[0] - centroid[0], xy[1] - centroid[1], ns) + centroid]
print new
self.voxels[(point, 'calibrated')] = new
self.toBeDeleted.append(point)
it.iternext()
for x in self.toBeDeleted:
del self.voxels[x]
@staticmethod
def __getNeighborhoodPoints(voxels, x, y):
return sum([[] if (i, j) not in voxels else voxels[(i, j)]
for i in range(x - 1, x + 2)
for j in range(y - 1, y + 2)],
[])
@staticmethod
def __genVFromXYNNN(x, y, ns):
v = np.array([x, y, 0, 0, 0])
for i in range(2, 5):
n = ns[i - 2]
v[i] = -np.dot(v[[0, 1]], n[[0, 1]]) / n[2]
return v
@staticmethod
def __fitPointsToPlanes(points):
if type(points) is not np.ndarray:
points = np.array(points)
centroid = np.average(points, axis=0)
pointsRelativeToCentroid = points - centroid
timesTable = np.dot(pointsRelativeToCentroid.T, pointsRelativeToCentroid)
def getNormal(n):
D = np.linalg.det(timesTable[0:2, 0:2])
a = np.linalg.det(timesTable[0:2, (1,n)]) / D
b = -np.linalg.det(timesTable[(0,n), 0:2]) / D
return np.array([a, b, 1])
return centroid, map(getNormal, range(2, 5))
@staticmethod
def __calculateBestSample(center, centroid, gridsize, targetAreaRatio):
print center, centroid
# const center, const centroid
center = np.copy(center)
centroid = np.copy(centroid[[0, 1]])
d = center - centroid
# if ratio is more than half
if targetAreaRatio > 0.5:
# equivalent to reversing direction and finding complement
d = -d
centroid = center - d
targetAreaRatio = 1 - targetAreaRatio
# if horizontal d
if abs(d[0]) > abs(d[1]):
# swap x and y of input
center[[0, 1]] = center[[1, 0]]
centroid[[0, 1]] = centroid[[1, 0]]
yx = EdgeImprover.__calculateBestSample(center, centroid, gridsize, targetAreaRatio)
# swap x and y of output
return yx[[1, 0]]
# if centroid is above
if d[1] < 0:
# reflect y of input
center[1] = -center[1]
centroid[1] = -centroid[1]
x_negY = EdgeImprover.__calculateBestSample(center, centroid, gridsize, targetAreaRatio)
# reflect y of output
return x_negY * [1, -1]
# if centroid is to the right
if d[0] < 0:
# reflect y of input
center[0] = -center[0]
centroid[0] = -centroid[0]
negX_y = EdgeImprover.__calculateBestSample(center, centroid, gridsize, targetAreaRatio)
# reflect y of output
return negX_y * [-1, 1]
# valid assumption: centroid is between S45W and S, ratio <= 0.5
halfGrid = gridsize / 2.0
# m = dy / dx
md = d[1] / d[0]
# mx + c = y
# c = y - mx
cd = center[1] - md * center[0]
# `y = h` is a line cutting square in targetAreaRatio
h = gridsize * targetAreaRatio + center[1] - halfGrid
# `y = mx + c` is a line cutting square in targetAreaRatio
# and perpendicular to center - centroid
m1 = -(d[0] / d[1])
# mx + c = y
# c = y - mx
c1 = h - m1 * center[0]
# test if `y = mx + c` touches the left and right edge of the square
leftY = m1 * (center[0] - halfGrid) + c1
rightY = m1 * (center[0] + halfGrid) + c1
if all(map(lambda y: center[1] - halfGrid < y < center[1] + halfGrid,
[leftY, rightY])):
# -m1x + y = c1
# -mdx + y = cd
# -> [-m1 1; -md 1][x; y] = [c1; cd]
return np.linalg.solve([[-m1, 1], [-md, 1]], [c1, cd])
else:
# area must be triangular
# let base be bt, height be ht
# area = bt ht / 2
# md = bt / ht
# area = md / 2 * ht^2
# ht = sqrt(2area / md)
m2 = m1
# mx + c = y
# c = y - mx
ht = np.sqrt(2 * targetAreaRatio * gridsize**2 / md)
yt = ht + center[1] - halfGrid
c2 = yt - m2 * (center[0] - halfGrid)
xy = np.linalg.solve([[-m2, 1], [-md, 1]], [c2, cd])
# check if in range
if not xy[1] < center[1] - halfGrid:
return xy
else:
# triangle too small, point outside of square
# compromise: return closest point on line
bt = md * ht
xt = bt + center[0] - halfGrid
return np.array([xt, center[1] - halfGrid])
def voxelGridFilter(points, tcoords, gridsize=0.01):
voxels = {}
print len(points)
maxX, maxY, minX, minY = -sys.maxint - 1, -sys.maxint - 1, sys.maxint, sys.maxint
for i in range(len(points)):
n = tuple(map(lambda x: int(round(x / gridsize)),np.copy(points[i]))[0:2])
if n not in voxels:
voxels[n] = []
minX, maxX = min(n[0], minX), max(n[0], maxX)
minY, maxY = min(n[1], minY), max(n[1], maxY)
voxels[n].append(np.hstack((points[i], tcoords[i])))
rp = [np.average(np.array([e[0:3] for e in voxels[n]]), axis=0) for n in voxels]
rt = [np.average(np.array([e[3:5] for e in voxels[n]]), axis=0) for n in voxels]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*([map(lambda p: p[i], points) for i in range(3)]), c='r', s=0.5)
ax.scatter(*([map(lambda p: p[i], rp) for i in range(3)]), c='b')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
EdgeImprover(voxels, gridsize, minX, minY, maxX, maxY).run()
rp = [np.average(np.array([e[0:3] for e in voxels[n]]), axis=0) for n in voxels]
rt = [np.average(np.array([e[3:5] for e in voxels[n]]), axis=0) for n in voxels]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*([map(lambda p: p[i], points) for i in range(3)]), c='r', s=0.5)
ax.scatter(*([map(lambda p: p[i], rp) for i in range(3)]), c='b')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
return rp, rt
def inpaint(bgr, depth):
pass
# def inpaint(bgr, depth):
# bgrSmall = cv2.resize(bgr, (depth.shape[1], depth.shape[0]))
# rgb = cv2.cvtColor(bgrSmall, cv2.COLOR_BGR2RGB)
# # pass by reference
# depth[:] = gdfmm.InpaintDepth2(depth, rgb, 1, 1, 2.0, 11)[:]
| mit |
mengxn/tensorflow | tensorflow/examples/learn/iris_val_based_early_stopping.py | 62 | 2827 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
learn = tf.contrib.learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
maheshakya/scikit-learn | sklearn/utils/graph.py | 50 | 6169 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
trangnm58/idrec | localization_cnn/model.py | 1 | 2308 | from __future__ import division, print_function, unicode_literals
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
# fix random seed for reproducibility
seed = 13
random.seed(seed)
np.random.seed(seed)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
sys.path.insert(0, "..")
from utils import Timer
from localization_cnn.dataset import Dataset
from localization_cnn import model_handler
from localization_cnn.constants import (
HEIGHT,
WIDTH,
PICKLE_DATASET,
DATA_NAME,
TRAINED_MODELS)
def build_model(num_of_class):
print("Building the model...")
model = Sequential()
model.add(Convolution2D(
nb_filter=40,
nb_row=7,
nb_col=7,
border_mode='same',
input_shape=(HEIGHT, WIDTH, 1),
activation='relu'
))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(
nb_filter=40,
nb_row=5,
nb_col=5,
border_mode='same',
input_shape=(HEIGHT, WIDTH, 1),
activation='relu'
))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_of_class, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
return model
def train_model(model, X_train, Y_train, X_val, Y_val, epochs=50):
print("Training the model...")
# how many examples to look at during each training iteration
batch_size = 128
# the training may be slow depending on your computer
history = model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=epochs,
validation_data=(X_val, Y_val))
return history
if __name__ == "__main__":
# deal with dataset
timer = Timer()
timer.start("Loading data")
d = Dataset(PICKLE_DATASET + DATA_NAME)
X_train, Y_train = d.get_train_dataset()
X_val, Y_val = d.get_val_dataset()
timer.stop()
num_of_class = Y_train.shape[1]
m = build_model(num_of_class)
epochs = 20
while True:
train_model(m, X_train, Y_train, X_val, Y_val, epochs)
epochs = input("More? ")
if not epochs:
break
else:
epochs = int(epochs)
name = input("Model's name or 'n': ")
if name != 'n':
model_handler.save_model(m, TRAINED_MODELS + name)
| mit |
kpolimis/sklearn-forest-ci | forestci/version.py | 2 | 2064 | # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 4
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "forestci: confidence intervals for scikit-learn "
description += "forest algorithms"
# Long description will go up on the pypi page
long_description = """
sklearn forest ci
=================
`forest-confidence-interval` is a Python module for calculating variance and
adding confidence intervals to scikit-learn random forest regression or
classification objects. The core functions calculate an in-bag and error bars
for random forest objects
Please read the repository README_ on Github or our documentation_
.. _README: https://github.com/scikit-learn-contrib/forest-confidence-interval/blob/master/README.md
.. _documentation: http://contrib.scikit-learn.org/forest-confidence-interval/
"""
NAME = "forestci"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "[email protected]"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/scikit-learn-contrib/forest-confidence-interval"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem, Bryna Hazelton, Kivan Polimis"
AUTHOR_EMAIL = "[email protected]"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
| bsd-3-clause |
louispotok/pandas | asv_bench/benchmarks/sparse.py | 3 | 4917 | import itertools
import numpy as np
import scipy.sparse
from pandas import (SparseSeries, SparseDataFrame, SparseArray, Series,
date_range, MultiIndex)
from .pandas_vb_common import setup # noqa
def make_array(size, dense_proportion, fill_value, dtype):
dense_size = int(size * dense_proportion)
arr = np.full(size, fill_value, dtype)
indexer = np.random.choice(np.arange(size), dense_size, replace=False)
arr[indexer] = np.random.choice(np.arange(100, dtype=dtype), dense_size)
return arr
class SparseSeriesToFrame(object):
goal_time = 0.2
def setup(self):
K = 50
N = 50001
rng = date_range('1/1/2000', periods=N, freq='T')
self.series = {}
for i in range(1, K):
data = np.random.randn(N)[:-i]
idx = rng[:-i]
data[100:] = np.nan
self.series[i] = SparseSeries(data, index=idx)
def time_series_to_frame(self):
SparseDataFrame(self.series)
class SparseArrayConstructor(object):
goal_time = 0.2
params = ([0.1, 0.01], [0, np.nan],
[np.int64, np.float64, np.object])
param_names = ['dense_proportion', 'fill_value', 'dtype']
def setup(self, dense_proportion, fill_value, dtype):
N = 10**6
self.array = make_array(N, dense_proportion, fill_value, dtype)
def time_sparse_array(self, dense_proportion, fill_value, dtype):
SparseArray(self.array, fill_value=fill_value, dtype=dtype)
class SparseDataFrameConstructor(object):
goal_time = 0.2
def setup(self):
N = 1000
self.arr = np.arange(N)
self.sparse = scipy.sparse.rand(N, N, 0.005)
self.dict = dict(zip(range(N), itertools.repeat([0])))
def time_constructor(self):
SparseDataFrame(columns=self.arr, index=self.arr)
def time_from_scipy(self):
SparseDataFrame(self.sparse)
def time_from_dict(self):
SparseDataFrame(self.dict)
class FromCoo(object):
goal_time = 0.2
def setup(self):
self.matrix = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0],
([1, 0, 0], [0, 2, 3])),
shape=(100, 100))
def time_sparse_series_from_coo(self):
SparseSeries.from_coo(self.matrix)
class ToCoo(object):
goal_time = 0.2
def setup(self):
s = Series([np.nan] * 10000)
s[0] = 3.0
s[100] = -1.0
s[999] = 12.1
s.index = MultiIndex.from_product([range(10)] * 4)
self.ss = s.to_sparse()
def time_sparse_series_to_coo(self):
self.ss.to_coo(row_levels=[0, 1],
column_levels=[2, 3],
sort_labels=True)
class Arithmetic(object):
goal_time = 0.2
params = ([0.1, 0.01], [0, np.nan])
param_names = ['dense_proportion', 'fill_value']
def setup(self, dense_proportion, fill_value):
N = 10**6
arr1 = make_array(N, dense_proportion, fill_value, np.int64)
self.array1 = SparseArray(arr1, fill_value=fill_value)
arr2 = make_array(N, dense_proportion, fill_value, np.int64)
self.array2 = SparseArray(arr2, fill_value=fill_value)
def time_make_union(self, dense_proportion, fill_value):
self.array1.sp_index.make_union(self.array2.sp_index)
def time_intersect(self, dense_proportion, fill_value):
self.array1.sp_index.intersect(self.array2.sp_index)
def time_add(self, dense_proportion, fill_value):
self.array1 + self.array2
def time_divide(self, dense_proportion, fill_value):
self.array1 / self.array2
class ArithmeticBlock(object):
goal_time = 0.2
params = [np.nan, 0]
param_names = ['fill_value']
def setup(self, fill_value):
N = 10**6
self.arr1 = self.make_block_array(length=N, num_blocks=1000,
block_size=10, fill_value=fill_value)
self.arr2 = self.make_block_array(length=N, num_blocks=1000,
block_size=10, fill_value=fill_value)
def make_block_array(self, length, num_blocks, block_size, fill_value):
arr = np.full(length, fill_value)
indicies = np.random.choice(np.arange(0, length, block_size),
num_blocks,
replace=False)
for ind in indicies:
arr[ind:ind + block_size] = np.random.randint(0, 100, block_size)
return SparseArray(arr, fill_value=fill_value)
def time_make_union(self, fill_value):
self.arr1.sp_index.make_union(self.arr2.sp_index)
def time_intersect(self, fill_value):
self.arr2.sp_index.intersect(self.arr2.sp_index)
def time_addition(self, fill_value):
self.arr1 + self.arr2
def time_division(self, fill_value):
self.arr1 / self.arr2
| bsd-3-clause |
STREAM3/visisc | docs/visISC_simple_frequency_data_example.py | 1 | 3316 |
# coding: utf-8
# # visISC Example: Visualizing Anomalous Frequency Data with Classes
# In this example, we will show what to do when you are analysing frequency counts of data and you want to identify which part of the data is the reason for a deviation.
# In[2]:
import pyisc;
import visisc;
import numpy as np
import datetime
from scipy.stats import poisson, norm, multivariate_normal
get_ipython().magic(u'matplotlib wx')
from pylab import plot, figure
# <b>First, we create a data set with a set of classes and a set of Poisson distributed frequency counts and then train an anomaly detector:</b>
# In[3]:
n_classes = 10
n_frequencies = 20
num_of_normal_days = 200
num_of_anomalous_days = 10
data = None
days_list = [num_of_normal_days, num_of_anomalous_days]
dates = []
for state in [0,1]:
num_of_days = days_list[state]
for i in range(n_classes):
data0 = None
for j in range(n_frequencies):
if state == 0:
po_dist = poisson(int((10+2*(n_classes-i))*(float(j)/n_frequencies/2+0.75))) # from 0.75 to 1.25
else:
po_dist = poisson(int((20+2*(n_classes-i))*(float(j)/n_frequencies+0.5))) # from 0.5 to 1.5
tmp = po_dist.rvs(num_of_days)
if data0 is None:
data0 = tmp
else:
data0 = np.c_[data0,tmp]
tmp = np.c_[
[1] * (num_of_days),
data0,
[
datetime.date(2015,02,24) + datetime.timedelta(d)
for d in np.array(range(num_of_days)) + (0 if state==0 else num_of_normal_days)
],
["Source %i"%i] * (num_of_days)
]
if data is None:
data = tmp
else:
data = np.r_[
tmp,
data
]
# Column index into the data
first_frequency_column = 1
period_column = 0
date_column = data.shape[-1]-2
source_column = data.shape[-1]-1
# <b>Next, we create a event data model that describes how our events are connected. In this case, we assume only a flat structure with events</b>
# First we create a flat model with a root element where all columns in the data is subelements
# In[6]:
model = visisc.EventDataModel.flat_model(
event_columns=range(first_frequency_column, date_column)
)
# Second we transform numpy array to a pyisc data object with a class column and a period column.
# The last created data object is also kept in the model.
# In[7]:
data_object = model.data_object(
data,
source_column = source_column,
class_column = source_column,
period_column = period_column,
date_column = date_column
)
# Then, we create an anomaly detector and fit a onesided poisson distribution for each event column.
# The last created and fitted anomaly detector is also kept in the model
# In[8]:
anomaly_detector = model.fit_anomaly_detector(data_object, poisson_onesided=True)
# <b>Finally, we can viualize the event frequency data using the Visualization class. However, due to a bug in the underlying 3D egnine, we have to run the notebook as a script:</b>
vis = visisc.EventVisualization(model, 13.8,start_day=209, precompute_cache=True)
| bsd-3-clause |
tgbugs/pyontutils | pyontutils/ontutils.py | 1 | 37830 | #!/usr/bin/env python3
#!/usr/bin/env pypy3
from pyontutils.config import auth
__doc__ = f"""Common commands for ontology processes.
Also old ontology refactors to run in the root ttl folder.
Usage:
ontutils set ontology-local-repo <path>
ontutils set scigraph-api-key <key>
ontutils devconfig [--write] [<field> ...]
ontutils parcellation
ontutils catalog-extras [options]
ontutils iri-commit [options] <repo>
ontutils deadlinks [options] <file> ...
ontutils scigraph-stress [options]
ontutils spell [options] <file> ...
ontutils version-iri [options] <file>...
ontutils uri-switch [options] <file>...
ontutils backend-refactor [options] <file>...
ontutils todo [options] <repo>
ontutils expand <curie>...
Options:
-a --scigraph-api=API SciGraph API endpoint [default: {auth.get('scigraph-api')}]
-o --output-file=FILE output file
-l --git-local=LBASE local git folder [default: {auth.get_path('git-local-base')}]
-u --curies=CURIEFILE curie definition file [default: {auth.get_path('curies')}]
-e --epoch=EPOCH specify the epoch to use for versionIRI
-r --rate=Hz rate in Hz for requests, zero is no limit [default: 20]
-t --timeout=SECONDS timeout in seconds for deadlinks requests [default: 5]
-f --fetch fetch catalog extras from their remote location
-d --debug drop into debugger when finished
-v --verbose verbose output
-w --write write devconfig file
"""
import os
from glob import glob
from time import time, localtime, strftime
from random import shuffle
from pathlib import Path, PurePath
import rdflib
import requests
import augpathlib as aug
from joblib import Parallel, delayed
from git.repo import Repo
from pyontutils.core import makeGraph, createOntology
from pyontutils.utils import noneMembers, anyMembers, Async, deferred, TermColors as tc
from pyontutils.ontload import loadall
from pyontutils.namespaces import getCuries
from pyontutils.namespaces import makePrefixes, definition
from pyontutils.closed_namespaces import rdf, rdfs, owl, skos
try:
import hunspell
except ImportError:
hunspell = None
# common
zoneoffset = strftime('%z', localtime())
def do_file(filename, swap, *args):
print('START', filename)
ng = rdflib.Graph()
ng.parse(filename, format='turtle')
reps = switchURIs(ng, swap, *args)
wg = makeGraph('', graph=ng)
wg.filename = filename
wg.write()
print('END', filename)
return reps
def switchURIs(g, swap, *args):
if len(args) > 1: # FIXME hack!
_, fragment_prefixes = args
reps = []
prefs = {None}
addpg = makeGraph('', graph=g)
for t in g:
nt, ireps, iprefs = tuple(zip(*swap(t, *args)))
if t != nt:
g.remove(t)
g.add(nt)
for rep in ireps:
if rep is not None:
reps.append(rep)
for pref in iprefs:
if pref not in prefs:
prefs.add(pref)
addpg.add_known_namespaces(fragment_prefixes[pref])
return reps
class ontologySection:
def __init__(self, filename):
self.filename = filename
with open(self.filename, 'rb') as f:
raw = f.read()
ontraw, self.rest = raw.split(b'###', 1)
self.graph = rdflib.Graph().parse(data=ontraw, format='turtle')
def write(self):
ontraw_comment = self.graph.serialize(format='nifttl', encoding='utf-8')
ontraw, comment = ontraw_comment.split(b'###', 1)
with open(self.filename, 'wb') as f:
f.write(ontraw)
f.write(b'###')
f.write(self.rest)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.write()
#
# utils
def catalog_extras(fetch=False):
path = Path(auth.get_path('ontology-local-repo'), 'ttl')
cat = (path / 'catalog-v001.xml').as_posix()
with open((path / '../catalog-extras').as_posix(), 'rt') as ce, open(cat, 'rt') as c:
clines = c.readlines()
celines = ce.readlines()
if clines[-2] != celines[-1]:
with open(cat, 'wt') as f:
f.writelines(clines[:-1] + celines + clines[-1:])
else:
print(tc.blue('INFO:'), 'extras already added to catalog doing nothing')
if fetch:
print(tc.blue('INFO:'), 'fetching extras')
def fetch_and_save(url, loc):
resp = requests.get(url)
saveloc = (path / loc).as_posix()
if resp.ok:
with open(saveloc, 'wb') as f:
f.write(resp.content)
print(tc.blue('INFO:'), f'{url:<60} written to {loc}')
else:
print(tc.red('WARNING:'), f'failed to fetch {url}')
Async()(deferred(fetch_and_save)(url, loc) for line in celines
for _, _, _, url, _, loc, _ in (line.split('"'),))
def spell(filenames, debug=False):
if hunspell is None:
raise ImportError('hunspell is not installed on your system. If you want '
'to run `ontutils spell` please run pipenv install --dev --skip-lock. '
'You will need the development libs for hunspell on your system.')
spell_objects = (u for r in Parallel(n_jobs=9)(delayed(get_spells)(f) for f in filenames) for u in r)
hobj = hunspell.HunSpell('/usr/share/hunspell/en_US.dic', '/usr/share/hunspell/en_US.aff')
#nobj = hunspell.HunSpell(os.path.expanduser('~/git/domain_wordlists/neuroscience-en.dic'), '/usr/share/hunspell/en_US.aff') # segfaults without aff :x
collect = set()
for filename, s, p, o in spell_objects:
missed = False
no = []
for line in o.split('\n'):
nline = []
for tok in line.split(' '):
prefix, tok, suffix = tokstrip(tok)
#print((prefix, tok, suffix))
if not hobj.spell(tok):# and not nobj.spell(tok):
missed = True
collect.add(tok)
nline.append(prefix + tc.red(tok) + suffix)
else:
nline.append(prefix + tok + suffix)
line = ' '.join(nline)
no.append(line)
o = '\n'.join(no)
if missed:
#print(filename, s, o)
print('>>>', o)
if debug:
[print(_) for _ in sorted(collect)]
breakpoint()
_bads = (',', ';', ':', '"', "'", '(', ')', '[',']','{','}',
'.', '-', '/', '\\t', '\\n', '\\', '%', '$', '*',
'`', '#', '@', '=', '?', '|', '<', '>', '+', '~')
def tokstrip(tok, side=None):
front = ''
back = ''
for bad in _bads:
if side is None or True:
ftok = tok[1:] if tok.startswith(bad) else tok
if ftok != tok:
front = front + bad
f, tok = tokstrip(ftok, True)
front = front + f
if side is None or False:
btok = tok[:1] if tok.endswith(bad) else tok
if btok != tok:
back = bad + back
tok, b = tokstrip(btok, False)
back = b + back
if side is None:
return front, tok, back
elif side:
return front, tok
else:
return tok, back
def get_spells(filename):
check_spelling = {skos.definition, definition, rdfs.comment}
return [(filename, s, p, o) for s, p, o in rdflib.Graph().parse(filename, format='turtle') if p in check_spelling]
def scigraph_stress(rate, timeout=5, verbose=False, debug=False, scigraph=auth.get('scigraph-api')):
# TODO use the api classes
with open((auth.get_path('resources') / 'chebi-subset-ids.txt').as_posix(), 'rt') as f:
urls = [os.path.join(scigraph, f'vocabulary/id/{curie.strip()}') for curie in f.readlines()]
print(urls)
url_blaster(urls, rate, timeout, verbose, debug)
def deadlinks(filenames, rate, timeout=5, verbose=False, debug=False):
urls = list(set(u for r in Parallel(n_jobs=9)(delayed(furls)(f) for f in filenames) for u in r))
url_blaster(urls, rate, timeout, verbose, debug)
def url_blaster(urls, rate, timeout=5, verbose=False, debug=False, method='head',
fail=False, negative=False, ok_test=lambda r: r.ok):
shuffle(urls) # try to distribute timeout events evenly across workers
if verbose:
[print(u) for u in sorted(urls)]
class Timedout:
ok = False
def __init__(self, url):
self.url = url
r_method = getattr(requests, method)
def method_timeout(url, _method=r_method):
try:
return _method(url, timeout=timeout)
except (requests.ConnectTimeout, requests.ReadTimeout) as e:
print('Timedout:', url, e)
return Timedout(url)
s = time()
collector = [] if debug else None
all_ = Async(rate=rate, debug=verbose, collector=collector)(deferred(method_timeout)(url) for url in urls)
o = time()
not_ok = [_.url for _ in all_ if not ok_test(_)]
d = o - s
print(f'Actual time: {d} Effective rate: {len(urls) / d}Hz diff: {(len(urls) / d) / rate if rate else 1}')
print('Failed:')
if not_ok:
for nok in not_ok:
print(nok)
ln = len(not_ok)
lt = len(urls)
lo = lt - ln
msg = f'{ln} urls out of {lt} ({ln / lt * 100:2.2f}%) are not ok. D:'
print(msg) # always print to get around joblib issues
if negative and fail:
if len(not_ok) == len(all_):
raise AssertionError('Everything failed!')
elif fail:
raise AssertionError(f'{msg}\n' + '\n'.join(sorted(not_ok)))
else:
print(f'OK. All {len(urls)} urls passed! :D')
if debug:
from matplotlib.pyplot import plot, savefig, figure, show, legend, title
from collections import defaultdict
def asyncVis(collector):
by_thread = defaultdict(lambda: [[], [], [], [], [], [], [], []])
min_ = 0
for thread, job, start, target_stop, stop, time_per_job, p, i, d in sorted(collector):
if not min_:
min_ = stop
by_thread[thread][0].append(job)
#by_thread[thread][1].append(start - min_)
by_thread[thread][2].append(target_stop - stop)
by_thread[thread][3].append(stop - min_)
by_thread[thread][4].append(time_per_job)
by_thread[thread][5].append(p)
by_thread[thread][6].append(i)
by_thread[thread][7].append(d)
for thread, (job, y1, y2, y3, y4, y5, y6, y7) in by_thread.items():
figure()
title(str(thread))
plot(job, [0] * len(job), 'r-')
#plot(job, y1, label=f'stop')
plot(job, y2, label=f'early by')
#plot(job, y3, label=f'stop')
#plot(job, y4, label=f'time per job') # now constant...
plot(job, y5, label='P')
plot(job, y6, label='I')
plot(job, y7, label='D')
legend()
show()
asyncVis(collector)
breakpoint()
def furls(filename):
return set(url for t in rdflib.Graph().parse(filename, format='turtle')
for url in t if isinstance(url, rdflib.URIRef) and not url.startswith('file://'))
def version_iris(*filenames, epoch=None):
# TODO make sure that when we add versionIRIs the files we are adding them to are either unmodified or in the index
if epoch is None:
epoch = int(time())
Parallel(n_jobs=9)(delayed(version_iri)(f, epoch) for f in filenames)
def version_iri(filename, epoch):
with ontologySection(filename) as ont:
add_version_iri(ont.graph, epoch)
def make_version_iri_from_iri(iri, epoch):
head, tail = iri.split('/', 1)
pp = PurePath(tail)
vp = (pp.with_suffix('') / 'version' / str(epoch) / pp.stem).with_suffix(pp.suffix)
viri = head + str(vp)
versionIRI = rdflib.URIRef(viri)
return rdflib.URIRef(versionIRI)
def add_version_iri(graph, epoch):
""" Also remove the previous versionIRI if there was one."""
for ont in graph.subjects(rdf.type, owl.Ontology):
for versionIRI in graph.objects(ont, owl.versionIRI):
graph.remove((ont, owl.versionIRI, versionIRI))
t = ont, owl.versionIRI, make_version_iri_from_iri(ont, epoch)
graph.add(t)
def validate_new_version_iris(diffs): # TODO
for diff in diffs:
diff.diff.split('\n')
def make_git_commit_command(git_local, repo_name):
# TODO also need to get the epochs for all unchanged files and make sure that the max of those is less than commit_epoch...
rp = RepoPath(git_local, repo_name)
repo = rp.repo
diffs = repo.index.diff(None) + repo.index.diff(repo.head.commit) # not staged + staged; cant use create_patch=True...
filenames = [d.a_path for d in diffs if d.change_type == 'M']
print(filenames)
#validate_new_version_iris(something) # TODO
min_epoch = get_epoch(*filenames)
other_filenames = [f for f in repo.git.ls_files().split('\n') if f not in filenames and f.endswith('.ttl')]
# search all other existing ttl files to find the maximum existing versionIRI
max_old_epoch = get_epoch(*other_filenames, min_=False)
print(min_epoch, max_old_epoch)
minimum_time_difference_for_new_version = 2 # seconds
msg = ('you want a versionIRI less than {minimum_time_difference_for_new_version} '
'seconds newer than an existing versionIRI, slow down there bud')
# timeline ...old-1.|......old-2.|...<min-dt>...|.new-1..|.new-2..
# max_old_epoch min_epoch
assert minimum_time_difference_for_new_version <= min_epoch - max_old_epoch, msg
commit_epoch = min_epoch
# XXX I know I have tested this, but it still seems wrong because the versionIRIs
# are all in epoch which *should* be in utc, but maybe the way git works it works out
# as expected ??
print(f'git commit --date {commit_epoch}{zoneoffset}')
def get_epoch(*filenames, min_=True):
""" get the minimum or maximum epoch from ithe versionIRI triples
of multiple files """
comp_epoch = None
for f in filenames:
graph = ontologySection(f).graph
for ont in graph.subjects(rdf.type, owl.Ontology):
for versionIRI in graph.objects(ont, owl.versionIRI):
base, epoch, filename = versionIRI.rsplit('/', 2)
epoch = int(epoch)
print(epoch)
if comp_epoch is None:
comp_epoch = epoch
elif min_ and epoch < comp_epoch:
comp_epoch = epoch
elif not min_ and epoch > comp_epoch:
comp_epoch = epoch
print('min' if min_ else 'max', comp_epoch)
if comp_epoch is None:
if min_:
return 0
else:
return 0 # XXX this may cause errors down the line
return comp_epoch
#
# refactors
#
# uri switch
NIFSTDBASE = 'http://uri.neuinfo.org/nif/nifstd/'
def uri_switch_values(utility_graph):
fragment_prefixes = {
'NIFRID':'NIFRID',
'NIFSTD':'NIFSTD', # no known collisions, mostly for handling ureps
'birnlex_':'BIRNLEX',
'sao':'SAO',
'sao-':'FIXME_SAO', # FIXME
'nif_organ_':'FIXME_NIFORGAN', # single and seems like a mistake for nlx_organ_
'nifext_':'NIFEXT',
#'nifext_5007_', # not a prefix
'nlx_':'NLX',
#'nlx_0906_MP_', # not a prefix, sourced from mamalian phenotype ontology and prefixed TODO
#'nlx_200905_', # not a prefix
'nlx_anat_':'NLXANAT',
'nlx_cell_':'NLXCELL',
'nlx_chem_':'NLXCHEM',
'nlx_dys_':'NLXDYS',
'nlx_func_':'NLXFUNC',
'nlx_inv_':'NLXINV',
'nlx_mol_':'NLXMOL',
'nlx_neuron_nt_':'NLXNEURNT',
'nlx_organ_':'NLXORG',
'nlx_qual_':'NLXQUAL',
'nlx_res_':'NLXRES',
'nlx_sub_':'FIXME_NLXSUBCELL', # FIXME one off mistake for nlx_subcell?
'nlx_subcell_':'NLXSUB', # NLXSUB??
'nlx_ubo_':'NLXUBO',
'nlx_uncl_':'NLXUNCL',
}
uri_replacements = {
# Classes
'NIFCELL:Class_6':'NIFSTD:Class_6',
'NIFCHEM:CHEBI_18248':'NIFSTD:CHEBI_18248',
'NIFCHEM:CHEBI_26020':'NIFSTD:CHEBI_26020',
'NIFCHEM:CHEBI_27958':'NIFSTD:CHEBI_27958',
'NIFCHEM:CHEBI_35469':'NIFSTD:CHEBI_35469',
'NIFCHEM:CHEBI_35476':'NIFSTD:CHEBI_35476',
'NIFCHEM:CHEBI_3611':'NIFSTD:CHEBI_3611',
'NIFCHEM:CHEBI_49575':'NIFSTD:CHEBI_49575',
'NIFCHEM:DB00813':'NIFSTD:DB00813',
'NIFCHEM:DB01221':'NIFSTD:DB01221',
'NIFCHEM:DB01544':'NIFSTD:DB01544',
'NIFGA:Class_12':'NIFSTD:Class_12',
'NIFGA:Class_2':'NIFSTD:Class_2', # FIXME this record is not in neurolex
'NIFGA:Class_4':'NIFSTD:Class_4',
'NIFGA:FMAID_7191':'NIFSTD:FMA_7191', # FIXME http://neurolex.org/wiki/FMA:7191
'NIFGA:UBERON_0000349':'NIFSTD:UBERON_0000349',
'NIFGA:UBERON_0001833':'NIFSTD:UBERON_0001833',
'NIFGA:UBERON_0001886':'NIFSTD:UBERON_0001886',
'NIFGA:UBERON_0002102':'NIFSTD:UBERON_0002102',
'NIFINV:OBI_0000470':'NIFSTD:OBI_0000470',
'NIFINV:OBI_0000690':'NIFSTD:OBI_0000690',
'NIFINV:OBI_0000716':'NIFSTD:OBI_0000716',
'NIFMOL:137140':'NIFSTD:137140',
'NIFMOL:137160':'NIFSTD:137160',
'NIFMOL:D002394':'NIFSTD:D002394',
'NIFMOL:D008995':'NIFSTD:D008995',
'NIFMOL:DB00668':'NIFSTD:DB00668',
'NIFMOL:GO_0043256':'NIFSTD:GO_0043256', # FIXME http://neurolex.org/wiki/GO:0043256
'NIFMOL:IMR_0000512':'NIFSTD:IMR_0000512',
'NIFRES:Class_2':'NLX:293', # FIXME note that neurolex still thinks Class_2 goes here... not to NIFGA:Class_2
'NIFSUB:FMA_83604':'NIFSTD:FMA_83604', # FIXME http://neurolex.org/wiki/FMA:83604
'NIFSUB:FMA_83605':'NIFSTD:FMA_83605', # FIXME http://neurolex.org/wiki/FMA:83605
'NIFSUB:FMA_83606':'NIFSTD:FMA_83606', # FIXME http://neurolex.org/wiki/FMA:83606
'NIFUNCL:CHEBI_24848':'NIFSTD:CHEBI_24848', # FIXME not in interlex and not in neurolex_full.csv but in neurolex (joy)
'NIFUNCL:GO_0006954':'NIFSTD:GO_0006954', # FIXME http://neurolex.org/wiki/GO:0006954
}
uri_reps_nonstandard = {
# nonstandards XXX none of these collide with any other namespace
# that we might like to use in the future under NIFSTD:namespace/
# therefore they are being placed directly into NIFSTD and we will
# work out the details and redirects later (some intlerlex classes
# may need to be created) maybe when we do the backend refactor.
# Classes (from backend)
'BIRNANN:_birnlex_limbo_class':'NIFRID:birnlexLimboClass',
'BIRNANN:_birnlex_retired_class':'NIFRID:birnlexRetiredClass',
rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/DC_Term'):'NIFRID:dctermsClass',
rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/SKOS_Entity'):'NIFRID:skosClass',
rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/_backend_class'):'NIFRID:BackendClass',
rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/oboInOwlClass'):'NIFRID:oboInOwlClass',
# NamedIndividuals
'NIFORG:Infraclass':'NIFRID:Infraclass', # only used in annotaiton but all other similar cases show up as named individuals
'NIFORG:first_trimester':'NIFRID:first_trimester',
'NIFORG:second_trimester':'NIFRID:second_trimester',
'NIFORG:third_trimester':'NIFRID:third_trimester',
# ObjectProperties not in OBOANN or BIRNANN
'NIFGA:has_lacking_of':'NIFRID:has_lacking_of',
'NIFNEURNT:has_molecular_constituent':'NIFRID:has_molecular_constituent',
'NIFNEURNT:has_neurotransmitter':'NIFRID:has_neurotransmitter',
'NIFNEURNT:molecular_constituent_of':'NIFRID:molecular_constituent_of',
'NIFNEURNT:neurotransmitter_of':'NIFRID:neurotransmitter_of',
'NIFNEURNT:soma_located_in':'NIFRID:soma_located_in',
'NIFNEURNT:soma_location_of':'NIFRID:soma_location_of',
# AnnotationProperties not in OBOANN or BIRNANN
'NIFCHEM:hasStreetName':'NIFRID:hasStreetName',
'NIFMOL:hasGenbankAccessionNumber':'NIFRID:hasGenbankAccessionNumber',
'NIFMOL:hasLocusMapPosition':'NIFRID:hasLocusMapPosition',
'NIFMOL:hasSequence':'NIFRID:hasSequence',
'NIFORG:hasCoveringOrganism':'NIFRID:hasCoveringOrganism',
'NIFORG:hasMutationType':'NIFRID:hasMutationType',
'NIFORG:hasTaxonRank':'NIFRID:hasTaxonRank',
}
utility_graph.add_known_namespaces(*(c for c in fragment_prefixes.values() if 'FIXME' not in c))
ureps = {utility_graph.expand(k):utility_graph.expand(v)
for k, v in uri_replacements.items()}
ureps.update({utility_graph.check_thing(k):utility_graph.expand(v)
for k, v in uri_reps_nonstandard.items()})
return fragment_prefixes, ureps
def uri_switch(filenames, get_values):
replacement_graph = createOntology('NIF-NIFSTD-mapping',
'NIF* to NIFSTD equivalents',
makePrefixes(
'BIRNANN', 'BIRNOBI', 'BIRNOBO', 'NIFANN',
'NIFCELL', 'NIFCHEM', 'NIFDYS', 'NIFFUN',
'NIFGA', 'NIFGG', 'NIFINV', 'NIFMOL',
'NIFMOLINF', 'NIFMOLROLE', 'NIFNCBISLIM',
'NIFNEURBR', 'NIFNEURBR2', 'NIFNEURCIR',
'NIFNEURMC', 'NIFNEURMOR', 'NIFNEURNT',
'NIFORG', 'NIFQUAL', 'NIFRES', 'NIFRET',
'NIFSCID', 'NIFSUB', 'NIFUNCL', 'OBOANN',
'SAOCORE')
)
fragment_prefixes, ureps = get_values(replacement_graph)
print('Start writing')
trips_lists = Parallel(n_jobs=9)(delayed(do_file)(f, swapUriSwitch, ureps, fragment_prefixes) for f in filenames)
print('Done writing')
[replacement_graph.g.add(t) for trips in trips_lists for t in trips]
replacement_graph.write()
def swapUriSwitch(trip, ureps, fragment_prefixes):
for spo in trip:
if not isinstance(spo, rdflib.URIRef):
yield spo, None, None
continue
elif spo in ureps:
new_spo = ureps[spo]
rep = (new_spo, owl.sameAs, spo)
if 'nlx_' in new_spo:
pref = 'nlx_'
elif '/readable/' in new_spo:
pref = 'NIFRID'
else:
pref = 'NIFSTD'
yield new_spo, rep, pref
continue
elif anyMembers(spo, # backend refactor
'BIRNLex_annotation_properties.owl#',
'OBO_annotation_properties.owl#'):
_, suffix = spo.rsplit('#', 1)
new_spo = rdflib.URIRef(os.path.join(NIFSTDBASE, 'readable', suffix))
rep = (new_spo, owl.sameAs, spo)
pref = 'NIFRID'
yield new_spo, rep, pref
continue
try:
uri_pref, fragment = spo.rsplit('#', 1)
if '_' in fragment:
frag_pref, p_suffix = fragment.split('_', 1)
if not p_suffix[0].isdigit():
p, suffix = p_suffix.split('_', 1)
frag_pref = frag_pref + '_' + p
else:
suffix = p_suffix
frag_pref_ = frag_pref + '_'
if frag_pref_ in fragment_prefixes:
if frag_pref_ == 'nlx_sub_': pref = 'nlx_subcell_'
elif frag_pref_ == 'nif_organ_': pref = 'nlx_organ_'
else: pref = frag_pref_ # come on branch predictor you can do it!
elif frag_pref_ == 'nlx_neuron_': # special case
rest = 'nt_'
suffix = suffix[len(rest):]
pref = frag_pref_ + rest
else:
yield spo, None, None
continue
elif 'sao' in fragment:
suffix = fragment[3:].strip('-')
pref = 'sao'
else:
yield spo, None, None
continue
new_spo = rdflib.URIRef(NIFSTDBASE + pref + suffix)
if new_spo != spo:
rep = (new_spo, owl.sameAs, spo)
else:
rep = None
print('Already converted', spo)
yield new_spo, rep, pref
except ValueError: # there was no # so do not split
yield spo, None, None
continue
#
# backend
def backend_refactor_values():
uri_reps_lit = {
# from https://github.com/information-artifact-ontology/IAO/blob/master/docs/BFO%201.1%20to%202.0%20conversion/mapping.txt
'http://www.ifomis.org/bfo/1.1#Entity':'BFO:0000001',
'BFO1SNAP:Continuant':'BFO:0000002',
'BFO1SNAP:Disposition':'BFO:0000016',
'BFO1SNAP:Function':'BFO:0000034',
'BFO1SNAP:GenericallyDependentContinuant':'BFO:0000031',
'BFO1SNAP:IndependentContinuant':'BFO:0000004',
'BFO1SNAP:MaterialEntity':'BFO:0000040',
'BFO1SNAP:Quality':'BFO:0000019',
'BFO1SNAP:RealizableEntity':'BFO:0000017',
'BFO1SNAP:Role':'BFO:0000023',
'BFO1SNAP:Site':'BFO:0000029',
'BFO1SNAP:SpecificallyDependentContinuant':'BFO:0000020',
'BFO1SPAN:Occurrent':'BFO:0000003',
'BFO1SPAN:ProcessualEntity':'BFO:0000015',
'BFO1SPAN:Process':'BFO:0000015',
'BFO1SNAP:ZeroDimensionalRegion':'BFO:0000018',
'BFO1SNAP:OneDimensionalRegion':'BFO:0000026',
'BFO1SNAP:TwoDimensionalRegion':'BFO:0000009',
'BFO1SNAP:ThreeDimensionalRegion':'BFO:0000028',
'http://purl.org/obo/owl/OBO_REL#bearer_of':'RO:0000053',
'http://purl.org/obo/owl/OBO_REL#inheres_in':'RO:0000052',
'ro:has_part':'BFO:0000051',
'ro:part_of':'BFO:0000050',
'ro:has_participant':'RO:0000057',
'ro:participates_in':'RO:0000056',
'http://purl.obolibrary.org/obo/OBI_0000294':'RO:0000059',
'http://purl.obolibrary.org/obo/OBI_0000297':'RO:0000058',
'http://purl.obolibrary.org/obo/OBI_0000300':'BFO:0000054',
'http://purl.obolibrary.org/obo/OBI_0000308':'BFO:0000055',
# more bfo
'BFO1SNAP:SpatialRegion':'BFO:0000006',
'BFO1SNAP:FiatObjectPart':'BFO:0000024',
'BFO1SNAP:ObjectAggregate':'BFO:0000027',
'BFO1SNAP:Object':'BFO:0000030',
#'BFO1SNAP:ObjectBoundary' # no direct replacement, only occurs in unused
#'BFO1SPAN:ProcessAggregate' # was not replaced, could simply be a process itself??
#'BFO1SNAP:DependentContinuant' # was not replaced
# other
#'ro:participates_in' # above
#'ro:has_participant' # above
#'ro:has_part', # above
#'ro:part_of', # above
#'ro:precedes' # unused and only in inferred
#'ro:preceded_by' # unused and only in inferred
#'ro:transformation_of' # unused and only in inferred
#'ro:transformed_into' # unused and only in inferred
'http://purl.org/obo/owl/obo#inheres_in':'RO:0000052',
'http://purl.obolibrary.org/obo/obo#towards':'RO:0002503',
'http://purl.org/obo/owl/pato#towards':'RO:0002503',
'http://purl.obolibrary.org/obo/pato#inheres_in':'RO:0000052',
'BIRNLEX:17':'RO:0000053', # is_bearer_of
'http://purl.obolibrary.org/obo/pato#towards':'RO:0002503',
'ro:adjacent_to':'RO:0002220',
'ro:derives_from':'RO:0001000',
'ro:derives_into':'RO:0001001',
'ro:agent_in':'RO:0002217',
'ro:has_agent':'RO:0002218',
'ro:contained_in':'RO:0001018',
'ro:contains':'RO:0001019',
'ro:located_in':'RO:0001025',
'ro:location_of':'RO:0001015',
'ro:has_proper_part':'NIFRID:has_proper_part',
'ro:proper_part_of':'NIFRID:proper_part_of', # part of where things are not part of themsevles need to review
}
ug = makeGraph('', prefixes=makePrefixes('ro', 'RO', 'BIRNLEX', 'NIFRID',
'BFO', 'BFO1SNAP', 'BFO1SPAN'))
ureps = {ug.check_thing(k):ug.check_thing(v)
for k, v in uri_reps_lit.items()}
return ureps
def swapBackend(trip, ureps):
print(ureps)
for spo in trip:
if spo in ureps:
new_spo = ureps[spo]
rep = (new_spo, owl.sameAs, spo)
yield new_spo, rep, None
else:
yield spo, None, None
def backend_refactor(filenames, get_values):
ureps = get_values()
print('Start writing')
if len(filenames) == 1:
trips_lists = [do_file(f, swapBackend, ureps) for f in filenames]
else:
trips_lists = Parallel(n_jobs=9)(delayed(do_file)(f, swapBackend, ureps) for f in filenames)
print('Done writing')
breakpoint()
#
# graph todo
def graph_todo(graph, curie_prefixes, get_values):
ug = makeGraph('big-graph', graph=graph)
ug.add_known_namespaces('NIFRID')
fragment_prefixes, ureps = get_values(ug)
#all_uris = sorted(set(_ for t in graph for _ in t if type(_) == rdflib.URIRef)) # this snags a bunch of other URIs
#all_uris = sorted(set(_ for _ in graph.subjects() if type(_) != rdflib.BNode))
#all_uris = set(spo for t in graph.subject_predicates() for spo in t if isinstance(spo, rdflib.URIRef))
all_uris = set(spo for t in graph for spo in t if isinstance(spo, rdflib.URIRef))
prefs = set(_.rsplit('#', 1)[0] + '#' if '#' in _
else (_.rsplit('_',1)[0] + '_' if '_' in _
else _.rsplit('/',1)[0] + '/') for _ in all_uris)
nots = set(_ for _ in prefs if _ not in curie_prefixes) # TODO
sos = set(prefs) - set(nots)
all_uris = [u if u not in ureps
else ureps[u]
for u in all_uris]
#to_rep = set(_.rsplit('#', 1)[-1].split('_', 1)[0] for _ in all_uris if 'ontology.neuinfo.org' in _)
#to_rep = set(_.rsplit('#', 1)[-1] for _ in all_uris if 'ontology.neuinfo.org' in _)
ignore = (
# deprecated and only in as annotations
'NIFGA:birnAnatomy_011',
'NIFGA:birnAnatomy_249',
'NIFORG:birnOrganismTaxon_19',
'NIFORG:birnOrganismTaxon_20',
'NIFORG:birnOrganismTaxon_21',
'NIFORG:birnOrganismTaxon_390',
'NIFORG:birnOrganismTaxon_391',
'NIFORG:birnOrganismTaxon_56',
'NIFORG:birnOrganismTaxon_68',
'NIFINV:birnlexInvestigation_174',
'NIFINV:birnlexInvestigation_199',
'NIFINV:birnlexInvestigation_202',
'NIFINV:birnlexInvestigation_204',
)
ignore = tuple(ug.expand(i) for i in ignore)
non_normal_identifiers = sorted(u for u in all_uris
if 'ontology.neuinfo.org' in u
and noneMembers(u, *fragment_prefixes)
and not u.endswith('.ttl')
and not u.endswith('.owl')
and u not in ignore)
print(len(prefs))
embed()
def main():
from docopt import docopt, parse_defaults
args = docopt(__doc__, version='ontutils 0.0.1')
defaults = {o.name:o.value if o.argcount else None
for o in parse_defaults(__doc__)}
verbose = args['--verbose']
debug = args['--debug']
repo_name = args['<repo>']
git_local = os.path.expanduser(args['--git-local'])
epoch = args['--epoch']
curies_location = args['--curies']
curies = getCuries(curies_location)
curie_prefixes = set(curies.values())
filenames = args['<file>']
filenames.sort(key=lambda f: os.path.getsize(f), reverse=True) # make sure the big boys go first
refactor_skip = ('nif.ttl',
'resources.ttl',
'generated/chebislim.ttl',
'unused/ro_bfo_bridge.ttl',
'generated/ncbigeneslim.ttl',
'generated/NIF-NIFSTD-mapping.ttl')
rfilenames = [f for f in filenames if f not in refactor_skip]
if args['set']:
from pyontutils.config import auth
uc = auth.user_config
def set_uc(var, value):
with open(uc._path, 'rt') as f:
text = f.read()
if '#' in text:
msg = f'Comments detected! Not writing config! {uc._path}'
raise ValueError(msg)
blob = uc.load()
# XXX NEVER DUMP A CONFIG THIS YOU _WILL_ KLOBBER IT
# BY ACCIDENT AT SOME POINT AND WILL ERASE ANY/ALL COMMENTS
# THERE IS NO SAFETY WITH THIS IMPLEMENTATION
# USERS SHOULD EDIT THEIR CONFIGS DIRECTLY
# except that it makes giving instructions for
# setting values a bit more complicated
blob['auth-variables'][var] = value
uc.dump(blob)
if args['ontology-local-repo']:
var = 'ontology-local-repo'
olr = Path(args['<path>']).expanduser().resolve()
olr_string = olr.as_posix()
set_uc(var, olr_string)
value2 = auth.get_path(var)
if not value2.exists():
msg = f'{var} path does not exist! {value2}'
print(tc.red('WARNING'), msg)
msg = f'{var} path {value2} written to {uc._path}'
print(msg)
assert olr == value2
elif args['scigraph-api-key']:
# FIXME this is a hack on top of orthauth, which will not
#
# check the secrets path first to make sure it is ok
# be implementing programmtic modification of user config
# files any time soon, though it might make sense to have a
# "machine config path" in addition to auth and user config
path = ['scigraph', 'api', 'key']
spath = auth._pathit(uc.get_blob('auth-stores', 'secrets')['path'])
if not spath.parent.exists():
spath.parent.mkdir(parents=True)
spath.parent.chmod(0o0700)
if spath.suffix != '.yaml':
msg = f"Can't write secrets file of type {spath.suffix}"
args = None
raise NotImplementedError(msg)
v = None
try:
s = uc.secrets
v = s(*path)
except:
pass
if v is not None:
v = None
raise ValueError(f'Path already in secrets! {path} in {spath}')
# safely append to the secrets file
key = args['<key>']
path_key = f'\nscigraph:\n api:\n key: {key}'
if not spath.exists():
spath.touch()
spath.chmod(0o0600)
with open(spath, 'a+') as f:
f.write(path_key)
# set the config var
var = 'scigraph-api-key'
value = {'path': ' '.join(path)}
set_uc(var, value) # set the path
# XXX NOTE yes, it is correct to do this only after secrets succeeds
# otherwise it is possible to get into a state where secrets does
# not exist but there is a path pointing to it, so load this
# ontutils file will fail during import time
# test that we got the value we expected
value2 = auth.get(var)
msg = (f'Key written to secrets. {spath} and path to '
f'key was written to config {uc._path}')
print(msg)
assert key == value2, 'Key retrieved does not match key set!'
elif args['devconfig']:
if args['--write']:
file = devconfig.write(args['--output-file'])
print(f'config written to {file}')
elif args['<field>']:
for f in args['<field>']:
print(getattr(devconfig, f, ''))
else:
print(devconfig)
elif args['catalog-extras']:
catalog_extras(args['--fetch'])
elif args['version-iri']:
version_iris(*filenames, epoch=epoch)
elif args['scigraph-stress']:
scigraph_stress(int(args['--rate']), int(args['--timeout']), verbose, debug)
elif args['deadlinks']:
deadlinks(filenames, int(args['--rate']), int(args['--timeout']), verbose, debug)
elif args['spell']:
spell(filenames, debug)
elif args['iri-commit']:
make_git_commit_command(git_local, repo_name)
elif args['uri-switch']:
uri_switch(rfilenames, uri_switch_values)
elif args['backend-refactor']:
backend_refactor(rfilenames, backend_refactor_values)
elif args['todo']:
graph = loadall(git_local, repo_name, local=True)
graph_todo(graph, curie_prefixes, uri_switch_values)
breakpoint()
elif args['expand']:
curies['NLXWIKI'] = 'http://legacy.neurolex.org/wiki/'
for curie in args['<curie>']:
prefix, suffix = curie.split(':')
print(curies[prefix] + suffix)
if __name__ == '__main__':
main()
| mit |
boland1992/seissuite_iran | build/lib/seissuite/spectrum/heatinterpolate.py | 8 | 3048 | #!/usr/bin/env python
# combining density estimation and delaunay interpolation for confidence-weighted value mapping
# Dan Stowell, April 2013
import numpy as np
from numpy import random
#from math import exp, log
from scipy import stats, mgrid, c_, reshape, rot90
import matplotlib.delaunay
#import matplotlib.tri as tri
import matplotlib.delaunay.interpolate
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import matplotlib.cm as cm
#from colorsys import hls_to_rgb
import pickle
pickle_file = '/storage/ANT/spectral_density/station_pds_maxima/\
S Network 2014/noise_info0_SNetwork2014.pickle'
f = open(name=pickle_file, mode='rb')
data = pickle.load(f)
f.close()
#############################
# user settings
n = 100
gridsize = 100
fontsize = 'xx-small'
#############################
# first generate some random [x,y,z] data -- random locations but closest to the middle, and random z-values
# we will add some correlation to the z-values
data[:,2] += data[:,1]
data[:,2] += data[:,0]
# scale the z-values to 0--1 for convenience
zmin = np.min(data[:,2])
zmax = np.max(data[:,2])
xmin = np.min(data[:,0])
xmax = np.max(data[:,0])
ymin = np.min(data[:,1])
ymax = np.max(data[:,1])
zmin = np.min(data[:,2])
zmax = np.max(data[:,2])
##################################################
# plot it simply
plt.figure()
##################################################
# now make a KDE of it and plot that
kdeX, kdeY = mgrid[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
positions = c_[kdeX.ravel(), kdeY.ravel()]
values = c_[data[:,0], data[:,1]]
kernel = stats.kde.gaussian_kde(values.T)
kdeZ = reshape(kernel(positions.T).T, kdeX.T.shape)
##################################################
# now make a delaunay triangulation of it and plot that
tt = matplotlib.delaunay.triangulate.Triangulation(data[:,0], data[:,1])
print xmin, xmax, ymin, ymax
print gridsize
extrap = tt.nn_extrapolator(data[:,2])
interped = extrap[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
##################################################
# now combine delaunay with KDE
colours = np.zeros((gridsize, gridsize, 4))
kdeZmin = np.min(kdeZ)
kdeZmax = np.max(kdeZ)
confdepth = 0.45
for x in range(gridsize):
for y in range(gridsize):
conf = (kdeZ[x,y] - kdeZmin) / (kdeZmax - kdeZmin)
val = min(1., max(0., interped[x,y]))
colour = list(cm.rainbow(val))
# now fade it out to white according to conf
for index in [0,1,2]:
colour[index] = (colour[index] * conf) + (1.0 * (1. -conf))
colours[x,y,:] = colour
#colours[x,y,:] = np.hstack((hls_to_rgb(val, 0.5 + confdepth - (confdepth * conf), 1.0), 1.0))
#colours[x,y,:] = [conf, conf, 1.0-conf, val]
print colours
plt.imshow(rot90(colours), cmap=cm.rainbow, norm=LogNorm(\
vmin=zmin, vmax=zmax))
plt.title("interpolated & confidence-shaded")
plt.ylim([ymin,ymax])
plt.xlim([xmin,xmax])
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
############################################
plt.savefig("plot_heati_simple.svg", format='SVG')
| gpl-3.0 |
reinvantveer/Topology-Learning | model/building_lstm.py | 1 | 7119 | """
This script executes the task of estimating the building type, based solely on the geometry for that building.
The data for this script can be found at http://hdl.handle.net/10411/GYPPBR.
"""
import os
import socket
import sys
from datetime import datetime, timedelta
from pathlib import Path
from time import time
from urllib.request import urlretrieve
import numpy as np
from keras import Input
from keras.callbacks import TensorBoard
from keras.engine import Model
from keras.layers import LSTM, Dense, Bidirectional
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from prep.ProgressBar import ProgressBar
from topoml_util import geom_scaler
from topoml_util.slack_send import notify
SCRIPT_VERSION = '2.0.3'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SIGNATURE = SCRIPT_NAME + ' ' + SCRIPT_VERSION + ' ' + TIMESTAMP
DATA_FOLDER = '../files/buildings/'
TRAIN_DATA_FILE = 'buildings_train_v7.npz'
TEST_DATA_FILE = 'buildings_test_v7.npz'
TRAIN_DATA_URL = 'https://dataverse.nl/api/access/datafile/11381'
TEST_DATA_URL = 'https://dataverse.nl/api/access/datafile/11380'
SCRIPT_START = time()
# Hyperparameters
hp = {
'BATCH_SIZE': int(os.getenv('BATCH_SIZE', 512)),
'TRAIN_VALIDATE_SPLIT': float(os.getenv('TRAIN_VALIDATE_SPLIT', 0.1)),
'REPEAT_DEEP_ARCH': int(os.getenv('REPEAT_DEEP_ARCH', 0)),
'LSTM_SIZE': int(os.getenv('LSTM_SIZE', 32)),
'DENSE_SIZE': int(os.getenv('DENSE_SIZE', 32)),
'EPOCHS': int(os.getenv('EPOCHS', 200)),
'LEARNING_RATE': float(os.getenv('LEARNING_RATE', 1e-3)),
'RECURRENT_DROPOUT': float(os.getenv('RECURRENT_DROPOUT', 0.0)),
'GEOM_SCALE': float(os.getenv("GEOM_SCALE", 0)), # If no default or 0: overridden when data is known
}
OPTIMIZER = Adam(lr=hp['LEARNING_RATE'], clipnorm=1.)
# Load training data
path = Path(DATA_FOLDER + TRAIN_DATA_FILE)
if not path.exists():
print("Retrieving training data from web...")
urlretrieve(TRAIN_DATA_URL, DATA_FOLDER + TRAIN_DATA_FILE)
train_loaded = np.load(DATA_FOLDER + TRAIN_DATA_FILE)
train_geoms = train_loaded['geoms']
train_labels = train_loaded['building_type']
# Determine final test mode or standard
if len(sys.argv) > 1 and sys.argv[1] in ['-t', '--test']:
print('Training in final test mode')
path = Path(DATA_FOLDER + TEST_DATA_FILE)
if not path.exists():
print("Retrieving test data from web...")
urlretrieve(TEST_DATA_URL, DATA_FOLDER + TEST_DATA_FILE)
test_loaded = np.load(DATA_FOLDER + TEST_DATA_FILE)
test_geoms = test_loaded['geoms']
test_labels = test_loaded['building_type']
else:
print('Training in standard training mode')
# Split the training data in random seen/unseen sets
train_geoms, test_geoms, train_labels, test_labels = train_test_split(train_geoms, train_labels, test_size=0.1)
# Normalize
geom_scale = hp['GEOM_SCALE'] or geom_scaler.scale(train_geoms)
train_geoms = geom_scaler.transform(train_geoms, geom_scale)
test_geoms = geom_scaler.transform(test_geoms, geom_scale) # re-use variance from training
# Sort data according to sequence length
zipped = zip(train_geoms, train_labels)
train_input_sorted = {}
train_labels_sorted = {}
for geom, label in sorted(zipped, key=lambda x: len(x[0]), reverse=True):
# Map types to one-hot vectors
# noinspection PyUnresolvedReferences
one_hot_label = np.zeros((np.array(train_labels).max() + 1))
one_hot_label[label] = 1
sequence_len = geom.shape[0]
smallest_size_subset = sorted(train_input_sorted.keys())[0] if train_input_sorted else None
if not smallest_size_subset: # This is the first data point
train_input_sorted[sequence_len] = [geom]
train_labels_sorted[sequence_len] = [one_hot_label]
continue
if sequence_len in train_input_sorted: # the entry exists, append
train_input_sorted[sequence_len].append(geom)
train_labels_sorted[sequence_len].append(one_hot_label)
continue
# the size subset does not exist yet
# append the data to the smallest size subset if it isn't batch-sized yet
if len(train_input_sorted[smallest_size_subset]) < hp['BATCH_SIZE']:
geom = pad_sequences([geom], smallest_size_subset)[0] # make it the same size as the rest in the subset
train_input_sorted[smallest_size_subset].append(geom)
train_labels_sorted[smallest_size_subset].append(one_hot_label)
else:
train_input_sorted[sequence_len] = [geom]
train_labels_sorted[sequence_len] = [one_hot_label]
# Shape determination
geom_vector_len = train_geoms[0].shape[1]
output_size = np.array(train_labels).max() + 1
# Build model
inputs = Input(shape=(None, geom_vector_len))
model = Bidirectional(LSTM(hp['LSTM_SIZE'],
return_sequences=(hp['REPEAT_DEEP_ARCH'] > 0),
recurrent_dropout=hp['RECURRENT_DROPOUT']))(inputs)
for layer in range(hp['REPEAT_DEEP_ARCH']):
is_last_layer = (layer + 1 == hp['REPEAT_DEEP_ARCH'])
model = Bidirectional(LSTM(hp['LSTM_SIZE'],
return_sequences=(not is_last_layer),
recurrent_dropout=hp['RECURRENT_DROPOUT']))(model)
model = Dense(output_size, activation='softmax')(model)
model = Model(inputs=inputs, outputs=model)
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=OPTIMIZER),
model.summary()
# Callbacks
pgb = ProgressBar()
for epoch in range(hp['EPOCHS']):
for sequence_len in sorted(train_input_sorted.keys()):
message = 'Epoch {} of {}, sequence length {}'.format(epoch + 1, hp['EPOCHS'], sequence_len)
pgb.update_progress(epoch/hp['EPOCHS'], message)
inputs = np.array(train_input_sorted[sequence_len])
labels = np.array(train_labels_sorted[sequence_len])
model.fit(
x=inputs,
y=labels,
verbose=0,
epochs=epoch + 1,
initial_epoch=epoch,
batch_size=hp['BATCH_SIZE'],
validation_split=hp['TRAIN_VALIDATE_SPLIT'],
callbacks=[TensorBoard(log_dir='./tensorboard_log/' + SIGNATURE, write_graph=False)])
# Run on unseen test data
print('\n\nRun on test data...')
test_preds = [model.predict(np.array([test])) for test in test_geoms]
test_preds = [np.argmax(pred) for pred in test_preds]
accuracy = accuracy_score(test_labels, test_preds)
runtime = time() - SCRIPT_START
message = 'on {} completed with accuracy of \n{:f} \nin {} in {} epochs\n'.format(
socket.gethostname(), accuracy, timedelta(seconds=runtime), hp['EPOCHS'])
for key, value in sorted(hp.items()):
message += '{}: {}\t'.format(key, value)
notify(SIGNATURE, message)
print(SCRIPT_NAME, 'finished successfully with', message)
| mit |
astroJeff/dart_board | paper/scripts/HMXB_plots.py | 1 | 5798 | import numpy as np
import pickle
import matplotlib
matplotlib.use('Agg')
import corner
import matplotlib.pyplot as plt
from matplotlib import font_manager
from dart_board import posterior
# Fraction of data to ignore
frac = 0.99
# Load chains
chains = np.load("../data/HMXB_chain.npy")
if chains.ndim == 4: chains = chains[0]
n_chains, length, n_var = chains.shape
chains = chains[:,int(length*frac):,:]
n_chains, length, n_var = chains.shape
chains = chains.reshape((n_chains*length, n_var))
print(chains.shape)
# Load derived
MCMC_derived = np.load("../data/HMXB_derived.npy")
if MCMC_derived.ndim == 4: MCMC_derived = MCMC_derived[0]
n_chains, length, n_var = MCMC_derived.shape
MCMC_derived = MCMC_derived[:,int(length*frac):,:]
n_chains, length, n_var = MCMC_derived.shape
MCMC_derived = MCMC_derived.reshape((n_chains*length, n_var))
print(MCMC_derived.shape)
# Get indices of derived data for plots
idx_M1 = 0
idx_M2 = 1
idx_a = 2
idx_e = 3
idx_mdot1 = 5
if n_var == 9:
idx_k1 = 7
elif n_var == 17:
idx_k1 = 15
else:
return
# Move from ln parameters to parameters in chains
chains[:,0] = np.exp(chains[:,0])
chains[:,1] = np.exp(chains[:,1])
chains[:,2] = np.exp(chains[:,2])
chains[:,7] = np.exp(chains[:,7])
# Create a corner plot to show the posterior distribution
# fontProperties = {'family':'serif', 'serif':['Times New Roman'], 'weight':'normal', 'size':12}
# ticks_font = font_manager.FontProperties(family='Times New Roman', style='normal', \
# weight='normal', stretch='normal', size=10)
# plt.rc('font', **fontProperties)
# Corner plot
labels = [r"$M_{\rm 1, i}\ (M_{\odot})$",
r"$M_{\rm 2, i}\ (M_{\odot})$",
r"log $a_{\rm i}\ (R_{\odot})$",
r"$e_{\rm i}$",
r"$v_{\rm k, i}\ ({\rm km}\ {\rm s}^{-1})$",
r"$\theta_{\rm k}\ ({\rm rad.})$",
r"$\phi_{\rm k}\ ({\rm rad.})$",
r"$t_{\rm i}\ ({\rm Myr})$"]
# plt_range = ([13.9,13.95], [12,13], [0,4000], [0.7,1], [0,750], [0.0,np.pi], [0.0,np.pi], [15,22])
# plt_range = ([0,25], [0,20], [0,4000], [0.0,1], [0,750], [0.0,np.pi], [0.0,np.pi], [10,60])
plt_range = ([0,40], [0,30], [1,4], [0.0,1], [0,750], [0.0,np.pi], [0.0,np.pi], [0,60])
# Load traditional population synthesis results
trad_x_i = np.load("../data/HMXB_trad_chain.npy")
length, ndim = trad_x_i.shape
trad_likelihood = np.load("../data/HMXB_trad_lnprobability.npy")
trad_derived = np.load("../data/HMXB_trad_derived.npy")
length, ndim = trad_x_i.shape
trad_x_i = trad_x_i[int(length*frac):,:]
trad_likelihood = trad_likelihood[int(length*frac):]
trad_derived = trad_derived[int(length*frac):,:]
# trad_x_i = trad_x_i.reshape((len(trad_likelihood), 14))
# trad_derived = trad_derived.reshape((len(trad_likelihood), 9))
print(trad_x_i.shape)
print(trad_derived.shape)
# Make the orbital separation distribution in log-scale
chains.T[2] = np.log10(chains.T[2])
trad_x_i.T[2] = posterior.P_to_A(trad_x_i.T[0], trad_x_i.T[1], trad_x_i.T[2])
trad_x_i.T[2] = np.log10(trad_x_i.T[2])
# Plot distribution of initial binary parameters
fig, ax = plt.subplots(2, 4, figsize=(8,4.5))
for k in range(2):
for j in range(4):
i = 4*k+j
trad_i = i
if i == 7: trad_i = 12
ax[k,j].hist(trad_x_i.T[trad_i], range=plt_range[i], bins=30, normed=True, histtype='step', color='C0', label="Traditional")
# ax[k,j].hist(trad_x_i.T[trad_i], range=plt_range[i], bins=20, normed=True, weights=trad_likelihood, color='C0', label="Traditional", alpha=0.3)
ax[k,j].hist(chains.T[i], range=plt_range[i], bins=30, normed=True, color='k', label='MCMC', alpha=0.3)
ax[k,j].set_xlabel(labels[i])
ax[k,j].set_yticklabels([])
ax[0,0].legend(loc=1,prop={'size':6})
ax[1,1].set_xticks([0.0, np.pi/2., np.pi])
ax[1,2].set_xticks([0.0, np.pi/2., np.pi])
ax[1,1].set_xticklabels(["0", r"$\pi$/2", r"$\pi$"])
ax[1,2].set_xticklabels(["0", r"$\pi$/2", r"$\pi$"])
plt.tight_layout()
plt.savefig("../figures/HMXB_compare_x_i.pdf")
# Plot distribution of final binary parameters
fig, ax = plt.subplots(1, 3, figsize=(8,3))
labels = [r"$M_{\rm 1}\ (M_{\odot})$",
r"$M_{\rm 2}\ (M_{\odot})$",
r"$P_{\rm orb}$"]
from dart_board import posterior
from scipy import stats
trad_Porb = posterior.A_to_P(trad_derived.T[idx_M1], trad_derived.T[idx_M2], trad_derived.T[idx_a])
plt_range = ([1.0,2.5], [0.0,60.0], [0.0,500.0])
# ax[0].hist(trad_derived.T[0], range=plt_range[0], bins=20, normed=True, weights=trad_likelihood, color='C0', alpha=0.3, label='Traditional')
# ax[1].hist(trad_derived.T[1], range=plt_range[1], bins=20, normed=True, weights=trad_likelihood, color='C0', alpha=0.3)
# ax[2].hist(trad_Porb, range=plt_range[2], bins=20, normed=True, weights=trad_likelihood, color='C0', alpha=0.3)
ax[0].hist(trad_derived.T[idx_M1], range=plt_range[0], bins=40, normed=True, histtype='step', color='C0', label='Traditional')
ax[1].hist(trad_derived.T[idx_M2], range=plt_range[1], bins=40, normed=True, histtype='step', color='C0')
ax[2].hist(trad_Porb, range=plt_range[2], bins=40, normed=True, histtype='step', color='C0')
# Plot results from MCMC
MCMC_Porb = posterior.A_to_P(MCMC_derived.T[idx_M1], MCMC_derived.T[idx_M2], MCMC_derived.T[idx_a])
ax[0].hist(MCMC_derived.T[idx_M1], range=plt_range[0], bins=40, normed=True, color='k', label='MCMC', alpha=0.3)
ax[1].hist(MCMC_derived.T[idx_M2], range=plt_range[1], bins=40, normed=True, color='k', alpha=0.3)
ax[2].hist(MCMC_Porb, range=plt_range[2], bins=40, normed=True, color='k', alpha=0.3)
for i in range(3):
ax[i].set_xlabel(labels[i])
ax[i].set_xlim(plt_range[i])
ax[i].set_yticklabels([])
ax[0].legend(prop={'size':8})
plt.tight_layout()
plt.savefig("../figures/HMXB_compare_derived.pdf")
| mit |
Jimmy-Morzaria/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
AmberJBlue/aima-python | submissions/Blue/myNN.py | 3 | 3055 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Blue import music
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
musicATRB = DataFrame()
musicATRB.data = []
targetData = []
'''
Extract data from the CORGIS Music Library.
Most 'hit' songs average 48-52 bars and no more than ~3 minutes (180 seconds)...
'''
allSongs = music.get_songs()
for song in allSongs:
try:
length = float(song['song']["duration"])
targetData.append(length)
genre = song['artist']['terms'] #String
title = song['song']['title'] #String
# release = float(song['song']['Release'])
musicATRB.data.append([genre, title])
except:
traceback.print_exc()
musicATRB.feature_names = [
'Genre',
'Title',
'Release',
'Length',
]
musicATRB.target = []
def musicTarget(release):
if (length <= 210
): #if the song is less that 3.5 minutes (210 seconds) long
return 1
return 0
for i in targetData:
tt = musicTarget(i)
musicATRB.target.append(tt)
musicATRB.target_names = [
'Not a hit song',
'Could be a hit song',
]
Examples = {
'Music': musicATRB,
}
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (5000,),
activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = False,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
momentum = 0.4,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
musicScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(musicATRB.data)
musicScaled.data = scaleGrid(musicATRB.data)
musicScaled.feature_names = musicATRB.feature_names
musicScaled.target = musicATRB.target
musicScaled.target_names = musicATRB.target_names
Examples = {
'musicDefault': {
'frame': musicATRB,
},
'MusicSGD': {
'frame': musicATRB,
'mlpc': mlpc
},
'MusicScaled': {
'frame': musicScaled,
},
} | mit |
Barmaley-exe/scikit-learn | sklearn/datasets/svmlight_format.py | 39 | 15319 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
line_pattern = u("%d")
else:
line_pattern = u("%.16g")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
kate-v-stepanova/scilifelab | tests/full/test_production.py | 4 | 21220 | import shutil
import os
import logbook
import re
import yaml
import unittest
import pandas as pd
import numpy as np
try:
import drmaa
except:
pass
from ..classes import SciLifeTest
from classes import PmFullTest
from cement.core import handler
from scilifelab.pm.core.production import ProductionController
from scilifelab.utils.misc import filtered_walk, opt_to_dict
from scilifelab.bcbio.run import find_samples, setup_sample, run_bcbb_command, setup_merged_samples, sample_table, get_vcf_files, validate_sample_directories, _group_samples
from scilifelab.bcbio import merge_sample_config
LOG = logbook.Logger(__name__)
filedir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
j_doe_00_01 = os.path.abspath(os.path.join(filedir, "data", "production", "J.Doe_00_01"))
j_doe_00_04 = os.path.abspath(os.path.join(filedir, "data", "production", "J.Doe_00_04"))
j_doe_00_05 = os.path.abspath(os.path.join(filedir, "data", "production", "J.Doe_00_05"))
ANALYSIS_TYPE = 'Align_standard_seqcap'
GALAXY_CONFIG = os.path.abspath(os.path.join(filedir, "data", "config"))
SAMPLES = ['P001_101_index3', 'P001_102_index6']
FLOWCELL = '120924_AC003CCCXX'
FINISHED = {
'J.Doe_00_01': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[0], "FINISHED_AND_DELIVERED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[1], "FINISHED_AND_DELIVERED")},
'J.Doe_00_04': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[0], "FINISHED_AND_DELIVERED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[1], "FINISHED_AND_DELIVERED")}
}
REMOVED = {
'J.Doe_00_01': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[0], "FINISHED_AND_REMOVED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[1], "FINISHED_AND_REMOVED")},
'J.Doe_00_04': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[0], "FINISHED_AND_REMOVED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[1], "FINISHED_AND_REMOVED")}
}
class ProductionConsoleTest(PmFullTest):
"""Class for testing functions without drmaa"""
def setUp(self):
pass
def test_compression_suite(self):
"""Test various combinations of compression, decompression, cleaning"""
self.app = self.make_app(argv = ['production', 'decompress', 'J.Doe_00_01', '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l1 = self.app._output_data["stderr"].getvalue()
self.app = self.make_app(argv = ['production', 'decompress', 'J.Doe_00_01', '-f', FLOWCELL, '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l2 = self.app._output_data["stderr"].getvalue()
self.assertTrue(len(l1) > len(l2))
os.chdir(filedir)
def test_run(self):
"""Test various combinations of compression, decompression, cleaning"""
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_01', '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l1 = self.app._output_data["stderr"].getvalue()
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_01', '-f', FLOWCELL, '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l2 = self.app._output_data["stderr"].getvalue()
self.assertTrue(len(l1) > len(l2))
os.chdir(filedir)
@unittest.skipIf(not os.getenv("MAILTO"), "not running production test: set $MAILTO environment variable to your mail address to test mailsend")
@unittest.skipIf(not os.getenv("DRMAA_LIBRARY_PATH"), "not running production test: no $DRMAA_LIBRARY_PATH")
class ProductionTest(PmFullTest):
@classmethod
def setUpClass(cls):
if not os.getcwd() == filedir:
os.chdir(filedir)
LOG.info("Copy tree {} to {}".format(j_doe_00_01, j_doe_00_04))
if not os.path.exists(j_doe_00_04):
shutil.copytree(j_doe_00_01, j_doe_00_04)
## Set P001_102_index6 to use devel partition and require mailto environment variable for test
pp = os.path.join(j_doe_00_04, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
config = yaml.load(fh)
platform_args = config["distributed"]["platform_args"].split()
platform_args[platform_args.index("-t") + 1] = "00:10:00"
if not "--mail-user={}".format(os.getenv("MAILTO")) in platform_args:
platform_args.extend(["--mail-user={}".format(os.getenv("MAILTO"))])
if not "--mail-type=ALL" in platform_args:
platform_args.extend(["--mail-type=ALL"])
config["distributed"]["platform_args"] = " ".join(platform_args)
with open(pp, "w") as fh:
fh.write(yaml.safe_dump(config, default_flow_style=False, allow_unicode=True, width=1000))
for k in FINISHED.keys():
for v in FINISHED[k].values():
if os.path.exists(v):
os.unlink(v)
for v in REMOVED[k].values():
if os.path.exists(v):
os.unlink(v)
## FIXME: since we're submitting jobs to drmaa, data will be
## removed before the pipeline has finished. One solution would be
## to run on one of the module production datasets
# @classmethod
# def tearDownClass(cls):
# LOG.info("Removing directory tree {}".format(j_doe_00_04))
# os.chdir(filedir)
# shutil.rmtree(j_doe_00_04)
def test_production_setup(self):
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--only_setup', '--restart', '--drmaa'], extensions = ['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_production(self):
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_platform_args(self):
"""Test the platform arguments for a run"""
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart', '--sample', SAMPLES[1], '--drmaa'], extensions=['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_batch_submission(self):
"""Test that adding --batch groups commands into a batch submission"""
pp = os.path.join(j_doe_00_04, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
config = yaml.load(fh)
platform_args = config["distributed"]["platform_args"].split()
account = platform_args[platform_args.index("-A")+1]
self.app = self.make_app(argv = ['production', 'compress', 'J.Doe_00_04', '--debug', '--force', '--jobname', 'batchsubmission', '--drmaa', '--batch', '--partition', 'devel', '--time', '01:00:00', '-A', account], extensions=['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
def test_change_platform_args(self):
"""Test that passing --time actually changes platform
arguments. These arguments should have precedence over
whatever is written in the config file."""
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart', '--sample', SAMPLES[1], '--drmaa', '--time', '00:01:00', '-n'], extensions=['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_casava_transfer(self):
"""Test transfer of casava data from production to project"""
self.app = self.make_app(argv = ['production', 'transfer', 'J.Doe_00_03', '--debug', '--force', '--quiet'], extensions=[])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
j_doe_00_03 = os.path.abspath(os.path.join(filedir, "data", "projects", "j_doe_00_03"))
pattern = ".fastq(.gz)?$"
def fastq_filter(f):
return re.search(pattern, f) != None
fastq_files = filtered_walk(j_doe_00_03, fastq_filter)
self.assertEqual(len(fastq_files), 2)
def test_touch_finished(self):
"""Test touching finished files"""
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_01', '--debug', '--force', '--sample', SAMPLES[0]], extensions=[])
handler.register(ProductionController)
self._run_app()
self.assertTrue(os.path.exists(FINISHED['J.Doe_00_01'][SAMPLES[0]]))
samplefile = os.path.join(filedir, "data", "production", "J.Doe_00_01", "finished_sample.txt")
with open(samplefile, "w") as fh:
fh.write(SAMPLES[0] + "\n")
fh.write(SAMPLES[1] + "\n")
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_01', '--debug', '--force', '--sample', samplefile], extensions=[])
handler.register(ProductionController)
self._run_app()
self.assertTrue(os.path.exists(FINISHED['J.Doe_00_01'][SAMPLES[1]]))
## Make sure rsync fails
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_01', '--debug', '--force', '--sample', samplefile], extensions=[])
handler.register(ProductionController)
try:
self.app.setup()
self.app.config.set("runqc", "root", self.app.config.get("runqc", "root").replace("production", "projects"))
with self.app.log.log_setup.applicationbound():
self.app.run()
self.app.render(self.app._output_data)
finally:
self.app.close()
def test_remove_finished(self):
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_04', '--debug', '--force', '--sample', SAMPLES[1]], extensions=[])
handler.register(ProductionController)
self._run_app()
self.assertTrue(os.path.exists(FINISHED['J.Doe_00_04'][SAMPLES[1]]))
## Remove file, dry
self.app = self.make_app(argv = ['production', 'remove-finished', 'J.Doe_00_04', '--debug', '--force', '-n'], extensions=[])
handler.register(ProductionController)
self._run_app()
class UtilsTest(SciLifeTest):
@classmethod
def setUpClass(cls):
if not os.getcwd() == filedir:
os.chdir(filedir)
LOG.info("Copy tree {} to {}".format(j_doe_00_01, j_doe_00_05))
if not os.path.exists(j_doe_00_05):
shutil.copytree(j_doe_00_01, j_doe_00_05)
with open(os.path.join(j_doe_00_05, "samples.txt"), "w") as fh:
fh.write("\n\nP001_101_index3\nP001_104_index3")
with open(os.path.join(j_doe_00_05, "samples2.txt"), "w") as fh:
fh.write("\n\nP001_101_index3-bcbb-config.yaml")
@classmethod
def tearDownClass(cls):
LOG.info("Removing directory tree {}".format(j_doe_00_05))
os.chdir(filedir)
shutil.rmtree(j_doe_00_05)
def test_find_samples(self):
"""Test finding samples"""
flist = find_samples(j_doe_00_05)
self.assertIn(len(flist), [3,4])
flist = find_samples(j_doe_00_05, **{'only_failed':True})
self.assertIn(len(flist), [0,1])
def test_find_samples_from_file(self):
"""Find samples defined in file with empty lines and erroneous names"""
with open(os.path.join(j_doe_00_05, "P001_101_index3-bcbb-config.yaml"), "w") as fh:
fh.write("\n")
flist = find_samples(j_doe_00_05, sample=os.path.join(j_doe_00_05, "samples.txt"))
validate_sample_directories(flist, j_doe_00_05)
self.assertEqual(len(flist),2)
os.unlink(os.path.join(j_doe_00_05, "P001_101_index3-bcbb-config.yaml"))
def test_find_samples_from_file_with_yaml(self):
"""Find samples defined in file with empty lines and a bcbb-config.yaml file lying directly under root directory"""
flist = find_samples(j_doe_00_05, sample=os.path.join(j_doe_00_05, "samples2.txt"))
args = [flist, j_doe_00_05]
self.assertRaises(Exception, validate_sample_directories, *args)
def test_setup_merged_samples(self):
"""Test setting up merged samples"""
flist = find_samples(j_doe_00_05)
setup_merged_samples(flist, **{'dry_run':False})
with open(os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3-bcbb-config.yaml")) as fh:
conf = yaml.load(fh)
self.assertEqual(conf["details"][0]["files"][0], os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz"))
def test_merge_sample_config(self):
"""Test merging sample configuration files"""
flist = find_samples(j_doe_00_05)
fdict = _group_samples(flist)
out_d = os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL")
if not os.path.exists(out_d):
os.makedirs(out_d)
newconf = merge_sample_config(fdict["P001_101_index3"].values(), "P001_101_index3", out_d=out_d, dry_run=False)
self.assertTrue(os.path.exists(os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz" )))
self.assertTrue(os.path.exists(os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3_C003CCCXX_TGACCA_L001_R1_001.fastq.gz" )))
def test_setup_samples(self):
"""Test setting up samples, changing genome to rn4"""
flist = find_samples(j_doe_00_05)
for f in flist:
setup_sample(f, **{'analysis':'Align_standard_seqcap', 'genome_build':'rn4', 'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'num_cores':8, 'distributed':False})
for f in flist:
with open(f, "r") as fh:
config = yaml.load(fh)
if config["details"][0].get("multiplex", None):
self.assertEqual(config["details"][0]["multiplex"][0]["genome_build"], "rn4")
else:
self.assertEqual(config["details"][0]["genome_build"], "rn4")
with open(f.replace("-bcbb-config.yaml", "-post_process.yaml")) as fh:
config = yaml.load(fh)
self.assertEqual(config["custom_algorithms"][ANALYSIS_TYPE]["hybrid_bait"], 'rat_baits.interval_list')
self.assertEqual(config["custom_algorithms"][ANALYSIS_TYPE]["hybrid_target"], 'rat_targets.interval_list')
self.assertEqual(config["algorithm"]["num_cores"], 8)
for f in flist:
setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,
'no_only_run':True, 'google_report':True,
'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':False})
with open(f, "r") as fh:
config = yaml.load(fh)
if config["details"][0].get("multiplex", None):
self.assertEqual(config["details"][0]["multiplex"][0]["genome_build"], "rn4")
else:
self.assertEqual(config["details"][0]["genome_build"], "rn4")
with open(f.replace("-bcbb-config.yaml", "-post_process.yaml")) as fh:
config = yaml.load(fh)
self.assertEqual(config["algorithm"]["mark_duplicates"], False)
self.assertEqual(config["custom_algorithms"][ANALYSIS_TYPE]["mark_duplicates"], False)
def test_remove_files(self):
"""Test removing files"""
keep_files = ["-post_process.yaml$", "-post_process.yaml.bak$", "-bcbb-config.yaml$", "-bcbb-config.yaml.bak$", "-bcbb-command.txt$", "-bcbb-command.txt.bak$", "_[0-9]+.fastq$", "_[0-9]+.fastq.gz$", "^[0-9][0-9]_.*.txt$"]
pattern = "|".join(keep_files)
def remove_filter_fn(f):
return re.search(pattern, f) == None
flist = find_samples(j_doe_00_05)
for f in flist:
workdir = os.path.dirname(f)
remove_files = filtered_walk(workdir, remove_filter_fn)
self.assertNotIn("01_analysis_start.txt", [os.path.basename(x) for x in remove_files])
def test_remove_dirs(self):
"""Test removing directories before rerunning pipeline"""
keep_files = ["-post_process.yaml$", "-post_process.yaml.bak$", "-bcbb-config.yaml$", "-bcbb-config.yaml.bak$", "-bcbb-command.txt$", "-bcbb-command.txt.bak$", "_[0-9]+.fastq$", "_[0-9]+.fastq.gz$"]
pattern = "|".join(keep_files)
def remove_filter_fn(f):
return re.search(pattern, f) == None
flist = find_samples(j_doe_00_05)
for f in flist:
workdir = os.path.dirname(f)
remove_dirs = filtered_walk(workdir, remove_filter_fn, get_dirs=True)
self.assertIn("fastqc", [os.path.basename(x) for x in remove_dirs])
def test_bcbb_command(self):
"""Test output from command, changing analysis to amplicon and
setting targets and baits"""
flist = find_samples(j_doe_00_05)
for f in flist:
setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,
'no_only_run':False, 'google_report':False,
'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':False})
with open(f.replace("-bcbb-config.yaml", "-bcbb-command.txt")) as fh:
cl = fh.read().split()
(cl, platform_args) = run_bcbb_command(f)
self.assertIn("automated_initial_analysis.py",cl)
setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,
'no_only_run':False, 'google_report':False,
'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':True})
with open(f.replace("-bcbb-config.yaml", "-bcbb-command.txt")) as fh:
cl = fh.read().split()
(cl, platform_args) = run_bcbb_command(f)
self.assertIn("distributed_nextgen_pipeline.py",cl)
def test_global_post_process(self):
"""Test that when using a "global" post_process, jobname,
output, error and output directory are updated.
"""
flist = find_samples(j_doe_00_05)
pp = os.path.join(j_doe_00_01, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
postprocess = yaml.load(fh)
for f in flist:
(cl, platform_args) = run_bcbb_command(f, pp)
self.assertIn("--error", platform_args)
self.assertEqual(platform_args[platform_args.index("--error") + 1], f.replace("-bcbb-config.yaml", "-bcbb.err"))
@unittest.skipIf(not os.getenv("DRMAA_LIBRARY_PATH"), "not running UtilsTest.test_platform: no $DRMAA_LIBRARY_PATH")
def test_platform_args(self):
"""Test making platform args and changing them on the fly. """
from scilifelab.pm.ext.ext_distributed import make_job_template_args
pp = os.path.join(j_doe_00_05, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
config = yaml.load(fh)
platform_args = config["distributed"]["platform_args"].split()
self.assertIn("core", platform_args)
pargs = opt_to_dict(platform_args)
self.assertEqual("P001_102_index6-bcbb.log", pargs['-o'])
kw = {'time':'00:01:00', 'jobname':'test', 'partition':'devel'}
pargs = make_job_template_args(pargs, **kw)
self.assertEqual("devel", pargs['partition'])
nativeSpec = "-t {time} -p {partition} -A {account}".format(**pargs)
self.assertEqual("00:01:00", nativeSpec[3:11])
def test_sample_table(self):
"""Test making a sample table"""
flist = find_samples(j_doe_00_01)
samples = sample_table(flist)
grouped = samples.groupby("sample")
self.assertEqual(len(grouped.groups["P001_101_index3"]), 2)
self.assertEqual(len(grouped.groups["P001_102_index6"]), 1)
def test_summarize_variants(self):
"""Test summarizing variants"""
flist = find_samples(j_doe_00_01)
vcf_d = get_vcf_files(flist)
| mit |
research-team/NEUCOGAR | NEST/visual/cortical_column/scripts/func.py | 1 | 8975 | import os
import sys
import nest
import logging
import datetime
import numpy as np
import nest.topology as tp
import matplotlib.pyplot as plt
from data import *
from time import clock
from parameters import *
times = []
spikegenerators = {} # dict name_part : spikegenerator
spikedetectors = {} # dict name_part : spikedetector
dictPosition_NeuronID = {}
txtResultPath = ""
startsimulate = 0
endsimulate = 0
SAVE_PATH = ""
SYNAPSES = 0
NEURONS = 0
FORMAT = '%(name)s.%(levelname)s: %(message)s.'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger('function')
def build_model():
global NEURONS
nest.SetDefaults('iaf_psc_exp', iaf_neuronparams)
layerNumberZ = 6
neuronID = 2
for layer in Cortex:
columns = layer[area][X_area]
rows = layer[area][Y_area]
print rows
NEURONS += rows * columns
for y in range(rows):
for x in range(columns):
dictPosition_NeuronID[(float(x), float(y), float(layerNumberZ))] = neuronID
neuronID += 1
layerNumberZ -= 1
logger.debug("{0} {1} neurons".format(layer[Glu][k_name][:2], rows * columns))
logger.debug("X: {0} ({1}neu x {2}col) \n".format(layer[area][X_area], sum(layer[step]),
layer[area][X_area] / sum(layer[step])) +
" " * 16 +
"Y: {0} ({1}neu x {2}col)".format(layer[area][Y_area], 2, layer[area][Y_area] / 2))
model_3D = tp.CreateLayer({'positions': dictPosition_NeuronID.keys(),
'elements': 'iaf_psc_exp',
'extent': [1000.0, 1000.0, 100.0],
'edge_wrap': False})
# Build another parts
for part in Thalamus:
part[k_model] = 'iaf_psc_exp'
part[k_IDs] = nest.Create(part[k_model], part[k_NN])
NEURONS += part[k_NN]
logger.debug("{0} [{1}, {2}] {3} neurons".format(part[k_name], part[k_IDs][0],
part[k_IDs][0] + part[k_NN] - 1, part[k_NN]))
def marking_column():
layerNumberZ = 6
realLayer = 2
for layer in Cortex:
logger.debug("Marking Layer # {0}".format(layerNumberZ))
for Y_border in range(0, layer[area][Y_area], 2):
for X_border in range(0, layer[area][X_area], sum(layer[step])):
FirstPartNeuronsPositions = list()
SecondPartNeuronsPositions = list()
ThirdPartNeuronsPositions = list()
for X in range(X_border, X_border + layer[step][0], 1):
FirstPartNeuronsPositions.append(
dictPosition_NeuronID[(float(X), float(Y_border), float(layerNumberZ))])
FirstPartNeuronsPositions.append(
dictPosition_NeuronID[(float(X), float(Y_border + 1), float(layerNumberZ))])
if layer[step][1] != 0:
for X in range(X_border + layer[step][0], X_border + layer[step][0] + layer[step][1], 1):
SecondPartNeuronsPositions.append(
dictPosition_NeuronID[(float(X), float(Y_border), float(layerNumberZ))])
SecondPartNeuronsPositions.append(
dictPosition_NeuronID[(float(X), float(Y_border + 1), float(layerNumberZ))])
if layer[step][2] != 0:
for X in range(X_border + layer[step][0] + layer[step][1], X_border + sum(layer[step]), 1):
ThirdPartNeuronsPositions.append(
dictPosition_NeuronID[(float(X), float(Y_border), float(layerNumberZ))])
ThirdPartNeuronsPositions.append(
dictPosition_NeuronID[(float(X), float(Y_border + 1), float(layerNumberZ))])
layer[0][0] = FirstPartNeuronsPositions
layer[1][0] = SecondPartNeuronsPositions
layer[2][0] = ThirdPartNeuronsPositions
layerNumberZ -= 1
realLayer += 1
def connect(pre, post, syn_type=GABA, weight_coef=1):
global SYNAPSES
types[syn_type][0]['weight'] = weight_coef * types[syn_type][1]
conn_dict = {'rule': 'all_to_all',
'multapses': True}
nest.Connect(pre, post,
conn_spec=conn_dict,
syn_spec=types[syn_type][0])
SYNAPSES += len(pre) * len(post)
def connect_generator(part, startTime=1, stopTime=T, rate=250, coef_part=1):
name = part[k_name]
spikegenerators[name] = nest.Create('poisson_generator', 1, {'rate': float(rate),
'start': float(startTime),
'stop': float(stopTime)})
nest.Connect(spikegenerators[name], part[k_IDs],
syn_spec=static_syn,
conn_spec={'rule': 'fixed_outdegree',
'outdegree': int(part[k_NN] * coef_part)})
logger.debug("Generator => {0}. Element #{1}".format(name, spikegenerators[name][0]))
def connect_detector(part):
neurons = part[0]
name = part[k_name]
number = len(neurons)
spikedetectors[name] = nest.Create('spike_detector', params=detector_param)
nest.Connect(neurons, spikedetectors[name])
logger.debug("Detector => {0}. Tracing {1} neurons".format(name, number))
def connect_detector_Thalamus(part):
name = part[k_name]
number = part[k_NN] if part[k_NN] < N_detect else N_detect
spikedetectors[name] = nest.Create('spike_detector', params=detector_param)
nest.Connect(part[k_IDs][:number], spikedetectors[name])
logger.debug("Detector => {0}. Tracing {1} neurons".format(name, number))
'''Generates string full name of an image'''
def f_name_gen(path, name):
return "{0}{1}_dopamine_{2}.png".format(path, name, 'noise' if generator_flag else 'static')
def simulate():
import psutil
import time
global startsimulate, endsimulate, SAVE_PATH
SAVE_PATH = "results/output-{0}/".format(NEURONS)
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
begin = 0
nest.PrintNetwork()
logger.debug('* * * Simulating')
startsimulate = datetime.datetime.now()
for t in np.arange(0, T, dt):
print "SIMULATING [{0}, {1}]".format(t, t + dt)
nest.Simulate(dt)
end = clock()
times.append("{0:10.1f} {1:8.1f} {2:10.1f} {3:4.1f} {4}\n".format(begin, end - begin, end,
t, datetime.datetime.now().time()))
begin = end
print "COMPLETED {0}%\n".format(t/dt)
endsimulate = datetime.datetime.now()
logger.debug('* * * Simulation completed successfully')
def get_log(startbuild, endbuild):
logger.info("Number of neurons : {}".format(NEURONS))
logger.info("Number of synapses : {}".format(SYNAPSES))
logger.info("Building time : {}".format(endbuild - startbuild))
logger.info("Simulation time : {}".format(endsimulate - startsimulate))
logger.info("Noise : {}".format('YES' if generator_flag else 'NO'))
def save(GUI):
global txtResultPath
if GUI:
import pylab as pl
import nest.raster_plot
import nest.voltage_trace
logger.debug("Saving IMAGES into {0}".format(SAVE_PATH))
for key in spikedetectors:
try:
nest.raster_plot.from_device(spikedetectors[key], hist=True)
pl.savefig(f_name_gen(SAVE_PATH, "spikes_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print(" * * * from {0} is NOTHING".format(key))
txtResultPath = SAVE_PATH + 'txt/'
logger.debug("Saving TEXT into {0}".format(txtResultPath))
if not os.path.exists(txtResultPath):
os.mkdir(txtResultPath)
for key in spikedetectors:
save_spikes(spikedetectors[key], name=key)
with open(txtResultPath + 'timeSimulation.txt', 'w') as f:
for item in times:
f.write(item)
from collections import defaultdict
GlobalDICT = {}
def save_spikes(detec, name, hist=False):
title = "Raster plot from device '%i'" % detec[0]
ev = nest.GetStatus(detec, "events")[0]
ts = ev["times"]
gids = ev["senders"]
data = defaultdict(list)
if len(ts):
with open("{0}@spikes_{1}.txt".format(txtResultPath, name), 'w') as f:
f.write("Name: {0}, Title: {1}, Hist: {2}\n".format(name, title, "True" if hist else "False"))
for num in range(0, len(ev["times"])):
data[round(ts[num], 1)].append(gids[num])
for key in sorted(data.iterkeys()):
f.write("{0:>5} : {1:>4} : {2}\n".format(key, len(data[key]), sorted(data[key])))
else:
print "Spikes in {0} is NULL".format(name) | gpl-2.0 |
janusnic/21v-python | unit_20/matplotlib/axes_zoom_effect.py | 3 | 3303 | from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
# loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches = kwargs.copy()
prop_patches["ec"] = "none"
prop_patches["alpha"] = 0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches = kwargs.copy()
prop_patches["ec"] = "none"
prop_patches["alpha"] = 0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5, 5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| mit |
ClimbsRocks/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
LevinJ/CodeSamples | python/images/transformation/tranformation.py | 1 | 1917 | import cv2
import numpy as np
from matplotlib import pyplot as plt
print(cv2.__version__)
class Transformation:
def __init__(self):
return
def scaling(self):
self.img_trans = cv2.resize(self.img,None,fx=2, fy=2)
# self.img_trans = cv2.resize(self.img,(500,500))
return
def translation(self):
rows,cols, _= self.img.shape
M = np.float32([[1,0,50],[0,1,50]])
self.img_trans = cv2.warpAffine(self.img, M,(cols,rows))
return
def rotation(self):
rows,cols, _ = self.img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),45, 2)
self.img_trans = cv2.warpAffine(self.img,M,(cols,rows))
return
def affine_trans(self):
rows,cols= self.img.shape
pts1 = np.float32([[50,50],[200,50],[50,200]])
pts2 = np.float32([[10,100],[200,50],[100,250]])
M = cv2.getAffineTransform(pts1,pts2)
self.img_trans = cv2.warpAffine(self.img,M,(cols,rows))
return
def perspective_trans(self):
rows,cols = self.img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
self.img_trans = cv2.warpPerspective(self.img,M,(300,300))
return
def run(self):
img = cv2.imread('../temp/sift_basic_0.jpg', cv2.IMREAD_COLOR)
cv2.imshow('image',img)
self.img = img
# self.scaling()
# self.translation()
self.rotation()
# self.affine_trans()
# self.perspective_trans()
cv2.imshow('image_scaled',self.img_trans)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
if __name__ == "__main__":
obj= Transformation()
obj.run() | gpl-2.0 |
kimasx/smapp-toolkit | examples/plot_user_per_day_histogram.py | 2 | 3653 | """
Script makes users-per-day histogram going N days back.
Usage:
python plot_user_per_day_histograms.py -s smapp.politics.fas.nyu.edu -p 27011 -u smapp_readOnly -w SECRETPASSWORD -d USElection2016Hillary --days 10 --output-file hillary.png
@jonathanronen 2015/4
"""
import pytz
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from datetime import datetime, timedelta
from smapp_toolkit.twitter import MongoTweetCollection
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server', default='smapp.politics.fas.nyu.edu', help="Mongodb server address ['smapp.politics.fas.nyu.edu]")
parser.add_argument('-p', '--port', type=int, default=27011, help='Mongodb server port [27011]')
parser.add_argument('-u', '--user', help='Mongodb username [None]')
parser.add_argument('-w', '--password', help='Mongodb password [None')
parser.add_argument('-d', '--database', help='Mongodb database name [None]')
parser.add_argument('--days', default=7, help='How many days to go back [7]')
parser.add_argument('--timezone', default='America/New_York', help='Time zone to consider [America/New_York]')
parser.add_argument('--output-file', default='histogram.png', help='Output file [histogram.png]')
args = parser.parse_args()
print("Generating avg tweets/user/day histogram for {}".format(args.database))
TIMEZONE = pytz.timezone(args.timezone)
print("Days will be split according to time zone {}".format(args.timezone))
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=TIMEZONE)
n_days_ago = today - timedelta(days=args.days)
print("The period being considered is {} to {}".format(
n_days_ago.strftime('%Y-%m-%d'),
today.strftime('%Y-%m-%d')))
print("Connecting to database")
collection = MongoTweetCollection(args.server, args.port, args.user, args.password, args.database)
ntweets = collection.since(n_days_ago).until(today).count()
print("Considering {} tweets".format(ntweets))
userids = set()
counts = dict()
for i in range(args.days):
day_counts = defaultdict(lambda: 0)
day_start = n_days_ago + i*timedelta(days=1)
day_end = n_days_ago + (i+1)*timedelta(days=1)
print("Counting for {}".format(day_start.strftime('%Y-%m-%d')))
for tweet in collection.since(day_start).until(day_end):
day_counts[tweet['user']['id']] += 1
userids.add(tweet['user']['id'])
counts[day_start] = day_counts
print("Done getting data from database.")
#### AVERAGE TWEETS PER DAY COUNTS (how many users tweeted x times per day on average)
user_avg_daily_tweets = { user: np.mean([counts[day][user] for day in counts]) for user in userids }
fig = plt.figure(figsize=(10,8))
plt.subplot(212)
counts = np.log(user_avg_daily_tweets.values())
bins = np.linspace(0, max(counts), max(counts)*10+1)
plt.hist(counts, bins, color='r', alpha=.6)
plt.ylabel('Num users')
plt.xlabel('log(avg tweets per day)')
plt.subplot(211)
plt.title('Average number of tweets per day for users\n{}\n {} to {}'.format(
args.database,
n_days_ago.strftime('%Y-%m-%d'),
today.strftime('%Y-%m-%d')))
counts = np.array(user_avg_daily_tweets.values())
bins = np.linspace(0, max(counts), max(counts)+1)
plt.hist(counts, bins, color='r', alpha=.6)
plt.ylabel('Num users')
plt.xlabel('avg tweets per day')
plt.tight_layout()
plt.savefig(args.output_file)
print("Done.")
| gpl-2.0 |
bigdataelephants/scikit-learn | sklearn/feature_extraction/image.py | 32 | 17167 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
=====
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
=====
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Parameters
===========
i_h: int
The image height
i_w: int
The image with
p_h: int
The height of a patch
p_w: int
The width of a patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Parameters
----------
arr: ndarray
n-dimensional array of which patches are to be extracted
patch_shape: integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step: integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches: strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
sharkdata/sharkdata | app_ctdprofiles/ctd_profile_plot.py | 1 | 14823 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 13:19:50 2018
@author: a002028
"""
import time
import numpy as np
import pandas as pd
import zipfile
# from pprint import pprint
from bokeh.models import (
ColumnDataSource,
CustomJS,
WidgetBox,
LinearAxis,
Spacer,
) # , LabelSet, Slider
from bokeh.layouts import row, column # , layout, widgetbox
from bokeh.models.widgets import (
Select,
RangeSlider,
DataTable,
TableColumn,
Panel,
Tabs,
)
from bokeh.plotting import figure, show, output_file
# from bokeh.embed import components, file_html
# from bokeh.resources import CDN
# from bokeh.sampledata.periodic_table import elements
# from bokeh.resources import INLINE
class ReadZipFile(object):
""""""
def __init__(self, zipfile_path, filename):
self.archive = zipfile.ZipFile(zipfile_path, "r")
try:
file_path = self.get_filepath(self.archive.namelist(), filename)
row_list = self.open_file(file_path)
except IOError:
raise IOError("{} not found in {}".format(filename, zipfile_path))
self.setup_dataframe(row_list)
def setup_dataframe(self, row_list, sep="\t"):
"""
Creates pd.Series with row_list as input
:param row_list: list of rows
:param sep: str, splits each row
:return: pd.DataFrame
"""
serie = pd.Series(data=row_list)
columns = serie.iloc[0].split(sep)
bool_array = serie.index > 0
self._df = pd.DataFrame(
serie.iloc[bool_array].str.split(sep).tolist(), columns=columns
)
self._df.replace("", np.nan, inplace=True)
def get_dataframe(self):
"""
:return: pd.DataFrame
"""
return self._df
def get_data(self, parameters, astype=np.float):
"""
:param parameters: list, all parameters to include in return
:param astype: set data as this type eg. float, int, str
:return: dict or pd.DataFrame
"""
return self._df.loc[:, parameters].astype(astype)
def open_file(self, file_path, comment="//"):
"""
:param file_path: str, path to file
:param comment: str, exclude rows that starts with this comment
:return: list, selected rows
"""
file = self.archive.open(file_path)
row_list = [
l.decode("cp1252")
for l in file
if not l.decode("cp1252").startswith(comment)
]
return row_list
@staticmethod
def get_filepath(path_list, pattern):
"""
:param path_list:
:param pattern:
:return:
"""
for path in path_list:
if pattern in path:
return path
class BaseAxes(object):
""""""
def __init__(self):
self._xaxis = None
self._yaxis = None
def _convert_yaxis_values(self, y):
"""
Requires derived classes to override this method
"""
raise NotImplementedError
@property
def xaxis(self):
"""
:return: xaxis
"""
return self._xaxis
@xaxis.setter
def xaxis(self, xlabel):
"""
Setter of xaxis
:param xlabel: str
"""
self._xaxis = LinearAxis(axis_label=xlabel)
@property
def yaxis(self):
"""
:return: yaxis
"""
return self._yaxis
@yaxis.setter
def yaxis(self, ylabel):
"""
Setter of yaxis.
If ylabel equals depth or pressure (as defined below): execute self._convert_yaxis_values(ylabel)
:param ylabel: str
"""
self._yaxis = LinearAxis(axis_label=ylabel)
if "PRES" in ylabel or "DEP" in ylabel:
self._convert_yaxis_values(ylabel)
class ProfilePlot(BaseAxes):
"""
Utilizes interactive plotting tools of Bokeh
https://bokeh.pydata.org/en/latest/
"""
def __init__(self, dataframe, parameters=None, tabs=None):
self.df = dataframe
self.parameters = parameters
self.tabs = tabs
self.TOOLS = "reset,hover,pan,wheel_zoom,box_zoom,lasso_select,save"
self.TOOLTIPS = [("index", "$index"), ("x-value", "$x"), ("y-value", "$y")]
def _set_output_file(self, name):
"""
Sets plot title and creates html output file
:param name: str, name of profile
:return: HTML, output file
"""
self.TITLE = "Profile Plot - " + name
if name == "":
name = "profile_plot.html"
elif not name.endswith(".html"):
name = name + ".html"
# online, faster
output_file(name)
# offline, slower
# output_file(name, mode='inline')
def _convert_yaxis_values(self, ylabel):
"""
Is activated when y-axis label equals pressure or depth.
Why? Because we want value 0 (m or dbar) to start on top of the
plot with an increase downwards instead of upwards.
Turning all y-axis values negative and than override axis labels,
hence values are negative while labels are positive
:param ylabel: str, y label and dataframe key
"""
self._turn_values_negative(ylabel)
y_labels = {v: str(abs(v)) for v in range(0, int(min(self.df[ylabel])) * 2, -1)}
self.yaxis.major_label_overrides = y_labels
def _turn_values_negative(self, ylabel):
"""
Is activated when y-axis label equals pressure or depth.
Turning all y-axis values negative
:param ylabel: str, y label and dataframe key
"""
if "PRES" in ylabel or "DEP" in ylabel:
if not any(self.df[ylabel] < 0):
self.df[ylabel] = -self.df[ylabel]
def _get_source_selecters(self, x=None, y=None, source=None):
"""
:param x: str, x-axis parameter
:param y: str, y-axis parameter
:param source: bokeh.models.ColumnDataSource
:return: x- and y-axis selecter
"""
callback = CustomJS(
args={"source": source, "xaxis": self.xaxis, "yaxis": self.yaxis},
code="""
//console.log(' changed selected option', cb_obj.value);
var data = source.data;
data['x'] = data[x_param.value];
data['y'] = data[y_param.value];
xaxis.attributes.axis_label = x_param.value;
yaxis.attributes.axis_label = y_param.value;
xaxis.change.emit();
yaxis.change.emit();
source.change.emit();
""",
)
xaxis_selecter = Select(
title="x-axis parameter",
value=x,
options=self.parameters,
callback=callback,
width=200,
)
callback.args["x_param"] = xaxis_selecter
yaxis_selecter = Select(
title="y-axis parameter",
value=y,
options=self.parameters,
callback=callback,
width=200,
)
callback.args["y_param"] = yaxis_selecter
return xaxis_selecter, yaxis_selecter
def _get_xaxis_slider(self, plot_obj):
"""
Creates an "x-axis slider" that allows the user to interactively change the boundaries of the axes
:param plot_obj: Bokeh plot object
:return: x-axis slider
"""
callback = CustomJS(
args={"plot": plot_obj},
code="""
var a = cb_obj.value;
plot.x_range.start = a[0];
plot.x_range.end = a[1];
""",
)
slider = RangeSlider(
start=0,
end=50,
value=(self.df["x"].min(), self.df["x"].max()),
step=1,
title="x-axis range slider",
width=200,
)
slider.js_on_change("value", callback)
return slider
def _get_data_table(self, source=None):
"""
Create a data table associated to the plot object
:param source: bokeh.models.ColumnDataSource
:return: bokeh.models.widgets.DataTable
"""
columns = [
TableColumn(field="y", title="y-axis data"),
TableColumn(field="x", title="x-axis data"),
]
return DataTable(source=source, columns=columns, width=200)
def circle_plot(self, x=None, y=None, source=None):
"""
https://bokeh.pydata.org/en/latest/docs/reference/plotting.html#bokeh.plotting.figure.Figure.circle
Plot profile using bokeh circle plot
:param x: str, dataframe key
:param y: str, dataframe key
:param source: bokeh.models.sources.ColumnDataSource
:return: bokeh.plotting.figure.Figure
"""
plot = figure( # plot_width=800,
# x_axis_location="above",
# y_axis_location="right",
tools=self.TOOLS,
tooltips=self.TOOLTIPS,
title=self.TITLE,
)
plot.background_fill_color = "#dddddd"
plot.grid.grid_line_color = "white"
plot.outline_line_width = 1
plot.outline_line_color = "black"
plot.axis.visible = False
plot.circle("x", "y", source=source, size=12, line_color="lightblue", alpha=0.6)
# For setting definitions see @xaxis.setter, class BaseAxis
self.xaxis = x
# For setting definitions see @yaxis.setter, class BaseAxis
self.yaxis = y
plot.add_layout(self.xaxis, "below")
plot.add_layout(self.yaxis, "left")
return plot
# p.line(x, y, source=source, color='#A6CEE3')
# labels = LabelSet(x=x, y=y,
# text=z,
# y_offset=8,
# text_font_size="8pt", text_color="#555555",
# source=source, text_align='center')
# p.add_layout(labels)
def line_plot(self, x=None, y=None, source=None):
"""
https://bokeh.pydata.org/en/latest/docs/reference/plotting.html#bokeh.plotting.figure.Figure.line
Plot profile using bokeh line plot
:param x: str, dataframe key
:param y: str, dataframe key
:param source: bokeh.models.sources.ColumnDataSource
:return: bokeh.plotting.figure.Figure
"""
plot = figure(
plot_width=800,
x_axis_location="above",
tools=self.TOOLS,
tooltips=self.TOOLTIPS,
title=self.TITLE,
)
plot.background_fill_color = "#dddddd"
# plot.xaxis.axis_label = x
# plot.yaxis.axis_label = y
plot.grid.grid_line_color = "white"
plot.line("x", "y", source=source, color="black")
# plot.yaxis.major_label_overrides = self.y_labels
return plot
def plot(self, x=None, y=None, z=None, name=""):
"""
:param x: str, dataframe key
:param y: str, dataframe key
:param z: str, dataframe key
:param name: str, name of plot
:return: Interactive HTML plot
"""
self._set_output_file(name)
self._turn_values_negative(y)
self.df["x"] = self.df[x]
self.df["y"] = self.df[y]
source = ColumnDataSource(self.df)
circle_plot = self.circle_plot(x=x, y=y, source=source)
line_plot = self.line_plot(x=x, y=y, source=source)
xrange_slider = self._get_xaxis_slider(circle_plot)
xaxis_selecter, yaxis_selecter = self._get_source_selecters(
x=x, y=y, source=source
)
data_table = self._get_data_table(source=source)
# show(widgetbox(data_table))
# spacer = Spacer(width=100, height=100)
widget_list = [yaxis_selecter, xaxis_selecter, xrange_slider, data_table]
widgets = WidgetBox(*widget_list)
col_1 = column(circle_plot, sizing_mode="scale_width")
col_2 = column(widgets, sizing_mode="scale_width")
row_1 = row([col_1, col_2], sizing_mode="scale_width")
return row_1
# # tab1 = Panel(child=row_1, title="circle")#, sizing_mode='scale_width')
# # tab2 = Panel(child=line_plot, title="line")#, sizing_mode='scale_width')
# # tabs = Tabs(tabs=[tab1, tab2])#, sizing_mode='scale_width')
# # #
# # show(tabs)
# show(row_1)
#
# # row_3 = column(spacer, sizing_mode='scale_both')
# # show(row([col_1, col_2], sizing_mode='scale_width'))
# # show(layout(row([col_1, col_2])))
# # script, div = components((col_1, col_2))
# # show(components([col_1, col_2]))
# # show(row(circle_plot, widgets)) #, sizing_mode='stretch_both'))
# if __name__ == '__main__':
# # # path_zipfile = 'D:\\Utveckling\\Github\\ctdpy\\ctdpy\\exports\\SHARK_CTD_2018_IBT_SMHI.zip'
# # path_zipfile = 'D:\\Utveckling\\Github\\ctdpy\\ctdpy\\tests\\etc\\SHARK_CTD_2018_BAS_SMHI.zip'
# #
# # # profile_name = 'ctd_profile_SBE09_0827_20180120_0910_26_01_0126'
# # profile_name = 'ctd_profile_SBE09_1044_20181205_1536_34_01_0154'
#
#
#
# path_zipfile = 'D:/arnold/4_sharkdata/sharkdata_ftp/datasets/SHARK_CTDprofile_2018_BAS_SMHI_version_2019-01-15.zip'
# profile_name = 'ctd_profile_SBE09_1044_20181205_1536_34_01_0154'
#
#
#
# start_time = time.time()
# rzip = ReadZipFile(path_zipfile, profile_name)
# print("Zipfile loaded--%.3f sec" % (time.time() - start_time))
# # print(rzip._df)
#
# parameter_list = ['PRES_CTD [dbar]', 'CNDC_CTD [S/m]', 'CNDC2_CTD [S/m]', 'SALT_CTD [psu (PSS-78)]',
# 'SALT2_CTD [psu (PSS-78)]', 'TEMP_CTD [°C (ITS-90)]', 'TEMP2_CTD [°C (ITS-90)]',
# 'DOXY_CTD [ml/l]', 'DOXY2_CTD [ml/l]', 'PAR_CTD [µE/(cm2 ·sec)]', 'CHLFLUO_CTD [mg/m3]',
# 'TURB_CTD [NTU]', 'PHYC_CTD [ppb]']
# # parameter_list = ['PRES_CTD [dbar]','CNDC_CTD [mS/m]','CNDC2_CTD [mS/m]','SALT_CTD [psu]','SALT2_CTD [psu]',
# # 'TEMP_CTD [°C]','TEMP2_CTD [°C]','DOXY_CTD [ml/l]','DOXY2_CTD [ml/l]',
# # 'PAR_CTD [µE/(cm2 ·sec)]','CHLFLUO_CTD [mg/m3]','TURB_CTD [NTU]','PHYC_CTD [ppb]']
#
# start_time = time.time()
# data = rzip.get_data(parameter_list)
#
# print("Data retrieved--%.3f sec" % (time.time() - start_time))
# # data = rzip.get_dataframe()
# # print(data)
#
# start_time = time.time()
# profile = ProfilePlot(data, parameters=parameter_list)
# profile.plot(x='TEMP_CTD [°C (ITS-90)]',
# y='PRES_CTD [dbar]',
# z='SALT_CTD [psu (PSS-78)]',
# name=profile_name)
# print("Data ploted--%.3f sec" % (time.time() - start_time))
| mit |
rlabbe/filterpy | filterpy/kalman/tests/test_ekf.py | 2 | 2765 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from math import sqrt
import numpy as np
from filterpy.kalman import ExtendedKalmanFilter
from numpy import array, eye, asarray
from filterpy.common import Saver
from filterpy.examples import RadarSim
from pytest import approx
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
DO_PLOT = False
def test_ekf():
def H_of(x):
""" compute Jacobian of H matrix for state x """
horiz_dist = x[0]
altitude = x[2]
denom = sqrt(horiz_dist**2 + altitude**2)
return array([[horiz_dist/denom, 0., altitude/denom]])
def hx(x):
""" takes a state variable and returns the measurement that would
correspond to that state.
"""
return sqrt(x[0]**2 + x[2]**2)
dt = 0.05
proccess_error = 0.05
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
rk.F = eye(3) + array ([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])*dt
def fx(x, dt):
return np.dot(rk.F, x)
rk.x = array([-10., 90., 1100.])
rk.R *= 10
rk.Q = array([[0, 0, 0],
[0, 1, 0],
[0, 0, 1]]) * 0.001
rk.P *= 50
rs = []
xs = []
radar = RadarSim(dt)
ps = []
pos = []
s = Saver(rk)
for i in range(int(20/dt)):
z = radar.get_range(proccess_error)
pos.append(radar.pos)
rk.update(asarray([z]), H_of, hx, R=hx(rk.x)*proccess_error)
ps.append(rk.P)
rk.predict()
xs.append(rk.x)
rs.append(z)
s.save()
# test mahalanobis
a = np.zeros(rk.y.shape)
maha = scipy_mahalanobis(a, rk.y, rk.SI)
assert rk.mahalanobis == approx(maha)
s.to_array()
xs = asarray(xs)
ps = asarray(ps)
rs = asarray(rs)
p_pos = ps[:, 0, 0]
p_vel = ps[:, 1, 1]
p_alt = ps[:, 2, 2]
pos = asarray(pos)
if DO_PLOT:
plt.subplot(311)
plt.plot(xs[:, 0])
plt.ylabel('position')
plt.subplot(312)
plt.plot(xs[:, 1])
plt.ylabel('velocity')
plt.subplot(313)
#plt.plot(xs[:,2])
#plt.ylabel('altitude')
plt.plot(p_pos)
plt.plot(-p_pos)
plt.plot(xs[:, 0] - pos)
if __name__ == '__main__':
test_ekf() | mit |
simonvh/gimmemotifs | gimmemotifs/db/__init__.py | 1 | 18937 | """Motif databases."""
import glob
import os
from urllib.request import urlopen, urlretrieve
import re
import time
import tarfile
import shutil
from tempfile import mkdtemp
import zipfile
import pandas as pd
from gimmemotifs.motif import read_motifs
from gimmemotifs.utils import get_jaspar_motif_info
DEFAULT_OUT = "data/motif_databases/"
class MotifDb(object):
"""MotifDb base class.
Use to get a list of available databases:
>>> MotifDb.list_databases()
[]
"""
_dbs = {}
name = None
date = time.strftime("%Y-%m-%d")
@classmethod
def create(cls, name):
"""Create a motif database object based on the db name.
Parameters
----------
name : str
Name of the provider (eg. JASPAR, HOMER, ...)
Returns
-------
db : MotifDb instance
MotifDb instance.
"""
try:
return cls._dbs[name.lower()]()
except KeyError:
raise Exception("Unknown motif database")
@classmethod
def register_db(cls, dbname):
"""Register method to keep list of dbs."""
def decorator(subclass):
"""Register as decorator function."""
cls._dbs[dbname] = subclass
subclass.name = dbname
return subclass
return decorator
@classmethod
def list_databases(self):
"""List available databases."""
return self._dbs.keys()
def __hash__(self):
return hash(str(self.__class__))
def create_annotation(self, name, anno):
base = os.path.splitext(name)[0]
fname = base + ".motif2factors.txt"
with open(fname, "w") as f:
print("Motif\tFactor\tEvidence\tCurated", file=f)
for motif, factors in anno.items():
for factor, status, curated in factors:
print("{}\t{}\t{}\t{}".format(
motif, factor, status, curated),
file=f)
register_db = MotifDb.register_db
@register_db('jaspar')
class JasparMotifDb(MotifDb):
"""
JASPAR motif database
"""
URL = "http://jaspar.genereg.net/download/CORE/JASPAR2018_CORE{}_non-redundant_pfms_jaspar.txt"
NAME = "JASPAR2018{}.pfm"
GROUPS = ["", "vertebrates", "plants", "insects", "nematodes", "fungi", "urochordates"]
def download(self, outdir=DEFAULT_OUT):
### JASPAR ###
for group in self.GROUPS:
if group != "":
group = "_" + group
outfile = os.path.join(outdir, self.NAME.format(group))
url = self.URL.format(group)
with open(outfile, "w") as f:
with urlopen(url) as response:
for line in response:
line = line.decode().strip()
if line.startswith(">"):
line = "_".join(line.split("\t")[:2])
print(line, file=f)
motifs = read_motifs(outfile, fmt="jaspar")
with open(outfile, "w") as f:
print("# JASPAR2018{} motif database".format(group), file=f)
print("# Retrieved from: {}".format(url), file=f)
print("# Date: {}".format(self.date), file=f)
for motif in motifs:
print(motif.to_pwm(), file=f)
#if group == "_vertebrates":
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME.format(group)), anno)
def annotate_factors(self, motifs):
anno = {}
for motif in motifs:
info = get_jaspar_motif_info(motif.id.split("_")[0])
try:
mtype = info["type"]
if mtype == "universal protein binding microarray (PBM)":
mtype = "PBM"
except:
mtype = "Unknown"
factors = re.sub('\([^)]+\)', '', motif.id.split("_")[1]).split('::')
anno[motif.id] = [[f, mtype, "Y"] for f in factors]
return anno
@register_db('homer')
class HomerMotifDb(MotifDb):
"""
HOMER motif database
"""
NAME = "HOMER.pfm"
URL = "http://homer.ucsd.edu/homer/custom.motifs"
def download(self, outdir=DEFAULT_OUT):
### Homer ###
pfm_out = os.path.join(outdir, self.NAME)
with open(pfm_out, "w") as f:
print("# Homer motif database (v4.10)", file=f)
print("# Retrieved from: {}".format(self.URL), file=f)
print("# Date: {}".format(self.date), file=f)
with urlopen(self.URL) as response:
for line in response:
line = line.decode().strip()
if line.startswith(">"):
line = "_".join(line.split("\t")[:2])
print(line, file=f)
motifs = read_motifs(pfm_out)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME), anno)
def annotate_factors(self, motifs):
anno = {}
p = re.compile(r'\w+_([\w.-]+)\(([\w,-]+\))')
for motif in motifs:
name, source, _ = motif.id.split('/')
try:
m = p.search(name)
name_factor = m.group(1).lower()
source_factor = source.split("-")[1]
for tag in ["gfp", "v5", "biotin", "myc"]:
source_factor = source_factor.replace("." + tag, "")
if name_factor.replace("-", "") == source_factor.lower():
anno[motif.id] = [[source_factor, "ChIP-seq", "Y"]]
else:
pass
except:
pass
#anno[motif.id] = factors
return anno
@register_db('hocomoco')
class HocomocoMotifDb(MotifDb):
"""
HOCOMOCO motif database
"""
#BASE_URL = "http://hocomoco11.autosome.ru/final_bundle/hocomoco11/core/{0}/mono/"
#ANNO_URL = BASE_URL + "HOCOMOCOv11_core_annotation_{0}_mono.tsv"
#URL = BASE_URL + "HOCOMOCOv11_core_pcms_{0}_mono.txt"
#NAME = "HOCOMOCOv11_{}.pfm"
BASE_URL = "http://hocomoco10.autosome.ru/final_bundle/{0}/mono/"
ANNO_URL = BASE_URL + "HOCOMOCOv10_annotation_{0}_mono.tsv"
URL = BASE_URL + "HOCOMOCOv10_pcms_{0}_mono.txt"
NAME = "HOCOMOCOv10_{}.pfm"
def download(self, outdir=DEFAULT_OUT):
for group in ["HUMAN", "MOUSE"]:
outfile = os.path.join(outdir, self.NAME.format(group))
url = self.URL.format(group)
with open(outfile, "w") as f:
print("# HOCOMOCOv10_{} motif database".format(group), file=f)
print("# Retrieved from: {}".format(url), file=f)
print("# Date: {}".format(self.date), file=f)
with urlopen(url) as response:
for line in response:
line = line.decode().strip()
if line.startswith(">"):
line = "_".join(line.split("\t")[:2])
print(line, file=f)
motifs = read_motifs(outfile)
anno = self.annotate_factors(motifs, self.ANNO_URL.format(group))
self.create_annotation(os.path.join(outdir, self.NAME.format(group)), anno)
def annotate_factors(self, motifs, url):
anno = {}
with urlopen(url) as response:
for line in response:
vals = line.decode().strip().split("\t")
anno[vals[0]] = vals[1]
anno = {motif.id:[[anno[motif.id], "ChIP-seq", "Y"]] for motif in motifs}
return anno
@register_db('encode')
class EncodeMotifDb(MotifDb):
"""
ENCODE motif database
Kheradpour and Kellis, 2013, doi:10.1093/nar/gkt1249
"""
URL = "http://compbio.mit.edu/encode-motifs/motifs.txt"
NAME = "ENCODE.pfm"
def download(self, outdir=DEFAULT_OUT):
outfile = os.path.join(outdir, self.NAME)
with open(outfile, "w") as f:
print("# ENCODE motif database", file=f)
print("# Retrieved from: {}".format(self.URL), file=f)
print("# Date: Dec. 2013", file=f)
with urlopen(self.URL) as response:
for line in response:
line = line.decode().strip()
if line.startswith(">"):
line = line.replace("\t", " ")
print(line, file=f)
motifs = read_motifs(outfile)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME), anno)
def annotate_factors(self, motifs):
anno = {}
for motif in motifs:
if "disc" in motif.id:
source = motif.id.split(" ")[-1]
factor = source.split("_")[0]
anno[motif.id] = [[factor, "ChIP-seq", "N"]]
elif "jolma" in motif.id:
vals = motif.id.split(" ")
factor = vals[-1].split("_")[0]
anno[motif.id] = [[factor, "HT-SELEX", "N"]]
elif "transfac" in motif.id:
vals = motif.id.split(" ")
factor = vals[-2].split("_")[0]
anno[motif.id] = [[f, "TRANSFAC", "Y"] for f in factor.split("::")]
elif "jaspar" in motif.id:
vals = motif.id.split(" ")
jaspar_factor = vals[-1].split("_")[0]
factor = vals[-2].split("_")[0]
if len(jaspar_factor) > 1 and jaspar_factor[1] == jaspar_factor[1].lower():
factor = factor.capitalize()
anno[motif.id] = [[f, "JASPAR", "Y"] for f in factor.split("::")]
elif "bulyk" in motif.id:
vals = motif.id.split(" ")
factor = vals[-2].split("_")[0].capitalize()
bulyk_factor = vals[-1].split("_")[0].capitalize()
if factor != bulyk_factor and factor.startswith("Znf"):
factor = bulyk_factor
anno[motif.id] = [[factor, "PBM", "N"]]
else:
raise ValueError("Don't recognize motif {}".format(motif.id))
return anno
@register_db('factorbook')
class FactorbookMotifDb(MotifDb):
"""
Factorbook
"""
ANNO_URL = "https://genome.cshlp.org/content/suppl/2012/08/22/22.9.1798.DC1/TableS1.xls"
NAME = "factorbook.pfm"
def download(self, outdir=DEFAULT_OUT):
# Factorbook is only supplied in non-redundant form as a supplemental pdf
# For now, use the non-redundant version included with GimmeMotifs
infile = "data/motif_databases/factorbook.pfm"
outfile = os.path.join(outdir, self.NAME)
motifs = read_motifs(infile)
with open(outfile, "w") as f:
for motif in motifs:
print(motif.to_pwm(), file=f)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME), anno)
def annotate_factors(self, motifs):
anno = {}
df = pd.read_excel("https://genome.cshlp.org/content/suppl/2012/08/22/22.9.1798.DC1/TableS1.xls")
t = {}
for factor,motif_names in df[["HGNC ID", "canonical motif"]].dropna().drop_duplicates().values:
for m in motif_names.split(";"):
t[m] = t.get(m, []) + [factor]
for motif in motifs:
name = motif.id.split(".")[0]
if name in t:
for factor in t[name]:
anno[motif.id] = anno.get(motif.id, []) + [[factor, "ChIP-seq", "N"]]
return anno
@register_db('swissregulon')
class SwissregulonMotifDb(MotifDb):
"""
SwissRegulon
"""
URL = "http://swissregulon.unibas.ch/data/hg19_f5/hg19_weight_matrices_v2"
ANNO_URL = "http://swissregulon.unibas.ch/data/hg19_f5/hg19_mat_TF_associations.txt"
#URL = "http://swissregulon.unibas.ch/data/hg19/weight_matrices"
#ANNO_URL = "http://swissregulon.unibas.ch/data/hg19/mat_TF_associations.hg"
NAME = "SwissRegulon.pfm"
def download(self, outdir=DEFAULT_OUT):
outfile = os.path.join(outdir, self.NAME)
with open(outfile, "w") as f:
with urlopen(self.URL) as response:
for line in response:
line = line.decode().strip()
print(line, file=f)
motifs = read_motifs(outfile, fmt="transfac")
with open(outfile, "w") as f:
print("# SwissRegulon motif database (hg19:FANTOM5)", file=f)
print("# Retrieved from: {}".format(self.URL), file=f)
print("# Date: {}".format(self.date), file=f)
for motif in motifs:
if len(motif) > 0:
print(motif.to_pwm(), file=f)
motifs = read_motifs(outfile)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME), anno)
def annotate_factors(self, motifs):
anno = {}
with urlopen(self.ANNO_URL) as response:
for line in response:
line = line.decode().strip()
#print(line)
motif, *factors = line.split("\t")
factors = [f.split(":")[2] for f in factors[1:]]
for factor in factors:
anno[motif] = anno.get(motif, []) + [[factor, "Unknown", "N"]]
return anno
@register_db('image')
class ImageMotifDb(MotifDb):
"""
IMAGE
"""
URL = "http://bioinformatik.sdu.dk/solexa/webshare/IMAGE/IMAGE_v1.1.tar.gz"
NAME = "IMAGE.pfm"
def download(self, outdir=DEFAULT_OUT):
tmpdir = mkdtemp()
file_tmp = urlretrieve(self.URL, filename=None)[0]
tar = tarfile.open(file_tmp)
fname = "IMAGE/utils/Collection.motif"
members = [tar.getmember(fname)]
tar.extractall(tmpdir, members=members)
outfile = os.path.join(outdir, self.NAME)
motifs = read_motifs(os.path.join(tmpdir,fname))
with open(outfile, "w") as f:
print("# IMAGE motif database (v1.1)", file=f)
print("# Retrieved from: {}".format(self.URL), file=f)
print("# Date: {}".format(self.date), file=f)
for motif in motifs:
print(motif.to_pwm(), file=f)
shutil.rmtree(tmpdir)
motifs = read_motifs(outfile)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME), anno)
def annotate_factors(self, motifs):
anno = {}
tmpdir = mkdtemp()
file_tmp = urlretrieve(self.URL, filename=None)[0]
tar = tarfile.open(file_tmp)
fname = "IMAGE/utils/Genename_Motif.txt"
members = [tar.getmember(fname)]
tar.extractall(tmpdir, members=members)
with open(os.path.join(tmpdir, fname)) as f:
for line in f:
vals = line.strip().split("\t")
if len(vals) == 3:
factor, motif, status = vals
anno[motif] = anno.get(motif, []) + [[factor, status, "N"]]
shutil.rmtree(tmpdir)
return anno
@register_db('cis-bp')
class CisbpMotifDb(MotifDb):
"""
CIS-BP 1.02
"""
VERSION = "1.02"
BASE = "http://cisbp.ccbr.utoronto.ca/data/{}/DataFiles/Bulk_downloads/EntireDataset/".format(VERSION)
ANNO_URL = BASE + "/TF_Information_all_motifs.txt.zip"
URL = BASE + "/PWMs.zip"
NAME = "CIS-BP.pfm"
def download(self, outdir=DEFAULT_OUT):
tmpdir = mkdtemp()
file_tmp = urlretrieve(self.URL, filename=None)[0]
with zipfile.ZipFile(file_tmp,"r") as zip_ref:
zip_ref.extractall(tmpdir)
motifs = []
for fname in glob.glob(os.path.join(tmpdir, "pwms/*")):
m_id = os.path.splitext(os.path.basename(fname))[0]
for m in read_motifs(fname, fmt="transfac"):
if len(m) > 0:
m.id = m_id
motifs.append(m)
outfile = os.path.join(outdir, self.NAME)
with open(outfile, "w") as f:
print("# CIS-BP motif database (v{})".format(self.VERSION), file=f)
print("# Retrieved from: {}".format(self.URL), file=f)
print("# Date: {}".format(self.date), file=f)
for motif in motifs:
print(motif.to_pwm(), file=f)
shutil.rmtree(tmpdir)
motifs = read_motifs(outfile)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME), anno)
def annotate_factors(self, motifs):
anno = {}
df = pd.read_table(self.ANNO_URL)
df = df.loc[
df["TF_Species"].isin(["Homo_sapiens", "Mus_musculus"]) & (df["TF_Status"] != "N"),
["Motif_ID", "TF_Name", "MSource_Type", "TF_Status"]
]
df["TF_Status"] = df["TF_Status"].str.replace("D", "Y").str.replace("I", "N")
df["MSource_Type"] = df["MSource_Type"].str.replace("HocoMoco", "ChIP-seq")
df = df.drop_duplicates()
df = df.set_index("Motif_ID")
df.columns = ["Factor", "Evidence", "Curated"]
df = df.loc[df.index.intersection([m.id for m in motifs])].dropna()
for m_id,row in df.iterrows():
anno[m_id] = anno.get(m_id, []) + [row]
return anno
@register_db('rsat')
class RsatMotifDb(MotifDb):
"""
RSAT clustered motifs
"""
URL= "http://pedagogix-tagc.univ-mrs.fr/rsat/data/published_data/Castro_2016_matrix-clustering/Application_4/{}/cor0.8_Ncor0.65/All_{}_motifs_cluster_root_motifs.tf"
NAME = "RSAT_{}.pfm"
def download(self, outdir=DEFAULT_OUT):
for tax in ["insects", "plants", "vertebrates"]:
tax_ = tax
if not tax.endswith("es"):
tax_ = tax[:-1]
url = self.URL.format(tax.capitalize(), tax_)
print(url)
name = self.NAME.format(tax)
file_tmp = urlretrieve(url, filename=None)[0]
motifs = read_motifs(file_tmp, fmt="transfac")
outfile = os.path.join(outdir, name)
with open(outfile, "w") as f:
print("# RSAT non-redundant {} motif database".format(tax), file=f)
print("# Retrieved from: {}".format(url), file=f)
print("# Date: {}".format(self.date), file=f)
for motif in motifs:
print(motif.to_pwm(), file=f)
anno = self.annotate_factors(motifs)
self.create_annotation(os.path.join(outdir, self.NAME.format(tax)), anno)
def annotate_factors(self, motifs):
anno = {}
return anno
| mit |
CurryEleison/elblogreader | urltimetaken.py | 1 | 1530 | from AwsElbLogUtil import LogFileList, LogDataFrame, UTC
from datetime import datetime
import boto3
import pandas as pd
def main():
reftime = datetime(2016, 11, 22, 9, 30, 00, 0, UTC())
print reftime
s3 = boto3.resource('s3')
# Set up to get recent logfiles
# loglistgetter = LogFileList(s3res = s3, strictreftime = True)
# possible values are: adm, api, mainsites, simplesitecom, userdomains, usermainsites, usersimplesites
# recents = loglistgetter.get_recents("mainsites", refdate = reftime)
# Set up object to read in the logfiles
# framegetter = LogDataFrame(s3res = s3)
# Take filenames, download and make into a dataframe
# df = framegetter.make_dataframe(recents, lambda l: hasattr(l, 'path') and l.path.startswith('/tpltest'))
framegetter = LogDataFrame(s3res = s3)
df = framegetter.make_dataframe_fromfolder("~/junk/", lambda l: hasattr(l, 'path') and l.path.startswith('/tpltest'))
# print out names and timestamps of recents
# printrecentssummary(s3, recents)
meantime = df.sort_values(by = 'utctime', ascending=False).head(10)['servertime'].mean()
totallines = df.shape
print "Mean time of most recent 10 lines was {0} and shape of linelines was {1}".format(meantime, totallines)
def printrecentssummary(s3res, s3items):
for s3objsummary in s3items:
s3obj = s3res.Object(s3objsummary.bucket_name, s3objsummary.key)
print "{0.last_modified} {0.key}".format(s3obj)
if __name__ == "__main__":
main()
| mit |
PatrickChrist/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | 1 | 55286 | # -*- coding: utf-8 -*-
#
# grid_to_gssha.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# License BSD 3-Clause
from builtins import range
from datetime import datetime
from io import open as io_open
import logging
import numpy as np
from os import mkdir, path, remove, rename
import pangaea as pa
import pandas as pd
from past.builtins import basestring
from pytz import utc
from shutil import copy
import xarray as xr
from gazar.grid import ArrayGrid
from ..lib import db_tools as dbt
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# HELPER FUNCTIONS
# ------------------------------------------------------------------------------
def update_hmet_card_file(hmet_card_file_path, new_hmet_data_path):
"""This function updates the paths in the HMET card file to the new
location of the HMET data. This is necessary because the file paths
are absolute and will need to be updated if moved.
Args:
hmet_card_file_path(str): Location of the file used for the HMET_ASCII card.
new_hmet_data_path(str): Location where the HMET ASCII files are currently.
Example::
new_hmet_data_path = "E:\\GSSHA\\new_hmet_directory"
hmet_card_file_path = "E:\\GSSHA\\hmet_card_file.txt"
update_hmet_card_file(hmet_card_file_path, new_hmet_data_path)
"""
hmet_card_file_path_temp = "{0}_tmp".format(hmet_card_file_path)
try:
remove(hmet_card_file_path_temp)
except OSError:
pass
copy(hmet_card_file_path, hmet_card_file_path_temp)
with io_open(hmet_card_file_path_temp, 'w', newline='\r\n') as out_hmet_list_file:
with open(hmet_card_file_path) as old_hmet_list_file:
for date_path in old_hmet_list_file:
out_hmet_list_file.write(u"{0}\n".format(path.join(new_hmet_data_path,
path.basename(date_path))))
try:
remove(hmet_card_file_path)
except OSError:
pass
rename(hmet_card_file_path_temp, hmet_card_file_path)
def esat(temp):
"""
saturation water vapour pressure is expressed with the Teten’s formula
http://www.ecmwf.int/sites/default/files/elibrary/2016/16648-part-iv-physical-processes.pdf
7.2.1 (b) eqn. 7.5
esat(T) = a1*exp(a3*((T − T0)/(T − a4)))
a1 = 611.21 Pa, a3 = 17.502 and a4 = 32.19 K for saturation over water
T0 = 273.16 K
note: ignoring saturation over ice & mixed
"""
return 611.21 * np.exp(17.502 * (temp - 273.16) / (temp - 32.19))
def array_binary_percent(in_array):
"""Makes a dataset with values greater than zero 100 percent"""
in_array[in_array > 0] = 100
return in_array
def array_binary_percent_10(in_array):
"""Makes a dataset with values greater than zero 100/10 percent"""
in_array[in_array > 0] = 10
return in_array
# ------------------------------------------------------------------------------
# MAIN CLASS
# ------------------------------------------------------------------------------
class GRIDtoGSSHA(object):
"""This class converts the LSM output data to GSSHA formatted input.
Attributes:
gssha_project_folder(:obj:`str`): Path to the GSSHA project folder
gssha_project_file_name(:obj:`str`): Name of the GSSHA elevation grid file.
lsm_input_folder_path(:obj:`str`): Path to the input folder for the LSM files.
lsm_search_card(:obj:`str`): Glob search pattern for LSM files. Ex. "*.nc".
lsm_lat_var(Optional[:obj:`str`]): Name of the latitude variable in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_var(Optional[:obj:`str`]): Name of the longitude variable in the LSM netCDF files. Defaults to 'lon'.
lsm_time_var(Optional[:obj:`str`]): Name of the time variable in the LSM netCDF files. Defaults to 'time'.
lsm_lat_dim(Optional[:obj:`str`]): Name of the latitude dimension in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_dim(Optional[:obj:`str`]): Name of the longitude dimension in the LSM netCDF files. Defaults to 'lon'.
lsm_time_dim(Optional[:obj:`str`]): Name of the time dimension in the LSM netCDF files. Defaults to 'time'.
output_timezone(Optional[:obj:`tzinfo`]): This is the timezone to output the dates for the data. Default is the timezone of your GSSHA model. This option does NOT currently work for NetCDF output.
pangaea_loader(Optional[:obj:`str`]): String to define loader used when opening pangaea dataset (Ex. 'hrrr'). Default is None.
Example::
from gsshapy.grid import GRIDtoGSSHA
g2g = GRIDtoGSSHA(gssha_project_folder='E:/GSSHA',
gssha_project_file_name='gssha.prj',
lsm_input_folder_path='E:/GSSHA/lsm-data',
lsm_search_card="*.nc",
)
"""
# DEFAULT GSSHA NetCDF Attributes
netcdf_attributes = {
'precipitation_rate':
# NOTE: LSM INFO
# units = "kg m-2 s-1" ; i.e. mm s-1
{
'units': {
'gage': 'mm hr-1',
'ascii': 'mm hr-1',
'netcdf': 'mm hr-1',
},
'units_netcdf': 'mm hr-1',
'standard_name': 'rainfall_flux',
'long_name': 'Rain precipitation rate',
'gssha_name': 'precipitation',
'hmet_name': 'Prcp',
'conversion_factor': {
'gage': 3600,
'ascii': 3600,
'netcdf': 3600,
},
},
'precipitation_acc':
# NOTE: LSM INFO
# assumes units = "kg m-2" ; i.e. mm
# checks for units: "m"
{
'units': {
'gage': 'mm hr-1',
'ascii': 'mm hr-1',
'netcdf': 'mm hr-1',
},
'units_netcdf': 'mm hr-1',
'standard_name': 'rainfall_flux',
'long_name': 'Rain precipitation rate',
'gssha_name': 'precipitation',
'hmet_name': 'Prcp',
'conversion_factor': {
'gage': 1,
'ascii': 1,
'netcdf': 1,
},
},
'precipitation_inc':
# NOTE: LSM INFO
# assumes units = "kg m-2" ; i.e. mm
# checks for units: "m"
{
'units': {
'gage': 'mm hr-1',
'ascii': 'mm hr-1',
'netcdf': 'mm hr-1',
},
'units_netcdf': 'mm hr-1',
'standard_name': 'rainfall_flux',
'long_name': 'Rain precipitation rate',
'gssha_name': 'precipitation',
'hmet_name': 'Prcp',
'conversion_factor': {
'gage': 1,
'ascii': 1,
'netcdf': 1,
},
},
'pressure':
# NOTE: LSM INFO
# units = "Pa" ;
{
'units': {
'ascii': 'in. Hg',
'netcdf': 'mb',
},
'standard_name': 'surface_air_pressure',
'long_name': 'Pressure',
'gssha_name': 'pressure',
'hmet_name': 'Pres',
'conversion_factor': {
'ascii': 0.000295299830714,
'netcdf': 0.01,
},
},
'pressure_hg':
{
'units': {
'ascii': 'in. Hg',
'netcdf': 'mb',
},
'standard_name': 'surface_air_pressure',
'long_name': 'Pressure',
'gssha_name': 'pressure',
'hmet_name': 'Pres',
'conversion_factor': {
'ascii': 1,
'netcdf': 33.863886667,
},
},
'relative_humidity':
# NOTE: LSM Usually Specific Humidity
# units = "kg kg-1" ;
# standard_name = "specific_humidity" ;
# long_name = "Specific humidity" ;
{
'units': {
'ascii': '%',
'netcdf': '%',
},
'standard_name': 'relative_humidity',
'long_name': 'Relative humidity',
'gssha_name': 'relative_humidity',
'hmet_name': 'RlHm',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'relative_humidity_dew':
# NOTE: ECMWF provides dew point temperature
{
'units': {
'ascii': '%',
'netcdf': '%',
},
'standard_name': 'relative_humidity',
'long_name': 'Relative humidity',
'gssha_name': 'relative_humidity',
'hmet_name': 'RlHm',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'swe':
# Snow Water Eqivalent (SWE)
# units = "kg m-2", i.e. "mm"
# NOT IN HMET, USED FOR INITIALIZATION
# INIT_SWE_DEPTH
# http://www.gsshawiki.com/Snow_Card_Inputs_-_Optional
{
'units': {
'grid': 'm',
},
'standard_name': 'snow_water_eqivalent',
'long_name': 'Snow Water Eqivalent',
'gssha_name': 'snow_water_eqivalent',
'conversion_factor': {
'grid': 0.001,
},
},
'wind_speed':
# NOTE: LSM
# units = "m s-1" ;
{
'units': {
'ascii': 'kts',
'netcdf': 'kts',
},
'standard_name': 'wind_speed',
'long_name': 'Wind speed',
'gssha_name': 'wind_speed',
'hmet_name': 'WndS',
'conversion_factor': {
'ascii': 1.94,
'netcdf': 1.94,
},
},
'wind_speed_kmd':
# NOTE: LSM
# units = "km/day" ;
{
'units': {
'ascii': 'kts',
'netcdf': 'kts',
},
'standard_name': 'wind_speed',
'long_name': 'Wind speed',
'gssha_name': 'wind_speed',
'hmet_name': 'WndS',
'conversion_factor': {
'ascii': 0.0224537037,
'netcdf': 0.0224537037,
},
},
'wind_speed_kts':
{
'units': {
'ascii': 'kts',
'netcdf': 'kts',
},
'standard_name': 'wind_speed',
'long_name': 'Wind speed',
'gssha_name': 'wind_speed',
'hmet_name': 'WndS',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'temperature':
# NOTE: LSM
# units = "K" ;
{
'units': {
'ascii': 'F',
'netcdf': 'C',
},
'standard_name': 'air_temperature',
'long_name': 'Temperature',
'gssha_name': 'temperature',
'hmet_name': 'Temp',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
'conversion_function': {
'ascii': lambda temp_kelvin: temp_kelvin * 9.0 / 5.0 - 459.67,
'netcdf': lambda temp_celcius: temp_celcius - 273.15,
},
},
'temperature_f':
{
'units': {
'ascii': 'F',
'netcdf': 'C',
},
'standard_name': 'air_temperature',
'long_name': 'Temperature',
'gssha_name': 'temperature',
'hmet_name': 'Temp',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
'conversion_function': {
'ascii': lambda temp_farenheight: temp_farenheight,
'netcdf': lambda temp_farenheight: (temp_farenheight - 32) * 5.0 / 9.0,
},
},
'direct_radiation':
# DIRECT/BEAM/SOLAR RADIATION
# NOTE: LSM
# WRF: global_radiation * (1-DIFFUSIVE_FRACTION)
# units = "W m-2" ;
{
'units': {
'ascii': 'W hr m-2',
'netcdf': 'W hr m-2',
},
'standard_name': 'surface_direct_downward_shortwave_flux',
'long_name': 'Direct short wave radiation flux',
'gssha_name': 'direct_radiation',
'hmet_name': 'Drad',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'direct_radiation_j':
# DIRECT/BEAM/SOLAR RADIATION
# NOTE: LSM
# units = "J m-2" ;
{
'units': {
'ascii': 'W hr m-2',
'netcdf': 'W hr m-2',
},
'standard_name': 'surface_direct_downward_shortwave_flux',
'long_name': 'Direct short wave radiation flux',
'gssha_name': 'direct_radiation',
'hmet_name': 'Drad',
'conversion_factor': {
'ascii': 1 / 3600.0,
'netcdf': 1 / 3600.0,
},
},
'diffusive_radiation':
# DIFFUSIVE RADIATION
# NOTE: LSM
# WRF: global_radiation * DIFFUSIVE_FRACTION
# units = "W m-2" ;
{
'units': {
'ascii': 'W hr m-2',
'netcdf': 'W hr m-2',
},
'standard_name': 'surface_diffusive_downward_shortwave_flux',
'long_name': 'Diffusive short wave radiation flux',
'gssha_name': 'diffusive_radiation',
'hmet_name': 'Grad', # 6.1 GSSHA CODE INCORRECTLY SAYS IT IS GRAD
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'diffusive_radiation_j':
# DIFFUSIVE RADIATION
# NOTE: LSM
# ERA5: global_radiation - diffusive_radiation
# units = "J m-2" ;
{
'units': {
'ascii': 'W hr m-2',
'netcdf': 'W hr m-2',
},
'standard_name': 'surface_diffusive_downward_shortwave_flux',
'long_name': 'Diffusive short wave radiation flux',
'gssha_name': 'diffusive_radiation',
'hmet_name': 'Grad', # 6.1 GSSHA CODE INCORRECTLY SAYS IT IS GRAD
'conversion_factor': {
'ascii': 1 / 3600.0,
'netcdf': 1 / 3600.0,
},
},
'direct_radiation_cc':
# DIRECT/BEAM/SOLAR RADIATION
# NOTE: LSM
# DIFFUSIVE_FRACTION = cloud_cover_pc/100
# WRF: global_radiation * (1-DIFFUSIVE_FRACTION)
# units = "W m-2" ;
{
'units': {
'ascii': 'W hr m-2',
'netcdf': 'W hr m-2',
},
'standard_name': 'surface_direct_downward_shortwave_flux',
'long_name': 'Direct short wave radiation flux',
'gssha_name': 'direct_radiation',
'hmet_name': 'Drad',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'diffusive_radiation_cc':
# DIFFUSIVE RADIATION
# NOTE: LSM
# DIFFUSIVE_FRACTION = cloud_cover_pc/100
# WRF: global_radiation * DIFFUSIVE_FRACTION
# units = "W m-2" ;
{
'units': {
'ascii': 'W hr m-2',
'netcdf': 'W hr m-2',
},
'standard_name': 'surface_diffusive_downward_shortwave_flux',
'long_name': 'Diffusive short wave radiation flux',
'gssha_name': 'diffusive_radiation',
'hmet_name': 'Grad', # 6.1 GSSHA CODE INCORRECTLY SAYS IT IS GRAD
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
},
'cloud_cover':
# NOTE: LSM
# Between 0-1 (0=No Clouds; 1=Clouds) ;
{
'units': {
'ascii': '%',
'netcdf': '%/10',
},
'standard_name': 'cloud_cover_fraction',
'long_name': 'Cloud cover fraction',
'gssha_name': 'cloud_cover',
'hmet_name': 'Clod',
'conversion_factor': {
'ascii': 100,
'netcdf': 10,
},
'calc_4d_method': 'max',
'calc_4d_dim': 'bottom_top',
},
'cloud_cover_pc':
# NOTE: LSM
# Between 0-100 (0=No Clouds; 100=Full Clouds) ;
{
'units': {
'ascii': '%',
'netcdf': '%/10',
},
'standard_name': 'cloud_cover_fraction',
'long_name': 'Cloud cover fraction',
'gssha_name': 'cloud_cover',
'hmet_name': 'Clod',
'conversion_factor': {
'ascii': 1,
'netcdf': 0.1,
},
'calc_4d_method': 'max',
'calc_4d_dim': 'bottom_top',
},
'cloud_cover_bin':
# NOTE: LSM
# (0=No Clouds; 0>Clouds) ;
{
'units': {
'ascii': '%',
'netcdf': '%/10',
},
'standard_name': 'cloud_cover_fraction',
'long_name': 'Cloud cover fraction',
'gssha_name': 'cloud_cover',
'hmet_name': 'Clod',
'conversion_factor': {
'ascii': 1,
'netcdf': 1,
},
'conversion_function': {
'ascii': array_binary_percent,
'netcdf': array_binary_percent_10,
},
'calc_4d_method': 'max',
'calc_4d_dim': 'bottom_top',
},
}
def __init__(self,
gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card,
lsm_lat_var='lat',
lsm_lon_var='lon',
lsm_time_var='time',
lsm_lat_dim='lat',
lsm_lon_dim='lon',
lsm_time_dim='time',
output_timezone=None,
pangaea_loader=None,
):
"""
Initializer function for the GRIDtoGSSHA class
"""
self.gssha_project_folder = gssha_project_folder
self.gssha_project_file_name = gssha_project_file_name
self.lsm_input_folder_path = lsm_input_folder_path
self.lsm_search_card = lsm_search_card
self.lsm_lat_var = lsm_lat_var
self.lsm_lon_var = lsm_lon_var
self.lsm_time_var = lsm_time_var
self.lsm_lat_dim = lsm_lat_dim
self.lsm_lon_dim = lsm_lon_dim
self.lsm_time_dim = lsm_time_dim
self.output_timezone = output_timezone
self.pangaea_loader = pangaea_loader
self._xd = None
# load in GSSHA model files
project_manager, db_sessionmaker = \
dbt.get_project_session(path.splitext(self.gssha_project_file_name)[0],
self.gssha_project_folder)
db_session = db_sessionmaker()
project_manager.read(directory=self.gssha_project_folder,
filename=self.gssha_project_file_name,
session=db_session)
self.gssha_grid = project_manager.getGrid()
if self.output_timezone is None:
self.output_timezone = project_manager.timezone
db_session.close()
# load in modeling extent
self._load_modeling_extent()
@property
def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
path_to_lsm_files = path.join(self.lsm_input_folder_path,
self.lsm_search_card)
self._xd = pa.open_mfdataset(path_to_lsm_files,
lat_var=self.lsm_lat_var,
lon_var=self.lsm_lon_var,
time_var=self.lsm_time_var,
lat_dim=self.lsm_lat_dim,
lon_dim=self.lsm_lon_dim,
time_dim=self.lsm_time_dim,
loader=self.pangaea_loader)
self.lsm_time_dim = 'time'
self.lsm_time_var = 'time'
return self._xd
def _set_subset_indices(self, y_min, y_max, x_min, x_max):
"""
load subset based on extent
"""
y_coords, x_coords = self.xd.lsm.coords
dx = self.xd.lsm.dx
dy = self.xd.lsm.dy
lsm_y_indices_from_y, lsm_x_indices_from_y = \
np.where((y_coords >= (y_min - 2*dy)) &
(y_coords <= (y_max + 2*dy)))
lsm_y_indices_from_x, lsm_x_indices_from_x = \
np.where((x_coords >= (x_min - 2*dx)) &
(x_coords <= (x_max + 2*dx)))
lsm_y_indices = np.intersect1d(lsm_y_indices_from_y,
lsm_y_indices_from_x)
lsm_x_indices = np.intersect1d(lsm_x_indices_from_y,
lsm_x_indices_from_x)
self.xslice = slice(np.amin(lsm_x_indices),
np.amax(lsm_x_indices)+1)
self.yslice = slice(np.amin(lsm_y_indices),
np.amax(lsm_y_indices)+1)
def _load_modeling_extent(self):
"""
# Get extent from GSSHA Grid in LSM coordinates
# Determine range within LSM Grid
"""
####
# STEP 1: Get extent from GSSHA Grid in LSM coordinates
####
# reproject GSSHA grid and get bounds
min_x, max_x, min_y, max_y = self.gssha_grid.bounds(as_projection=self.xd.lsm.projection)
# set subset indices
self._set_subset_indices(min_y,
max_y,
min_x,
max_x)
def _time_to_string(self, dt, conversion_string="%Y %m %d %H %M"):
"""
This converts a UTC time integer to a string
"""
if self.output_timezone is not None:
dt = dt.replace(tzinfo=utc) \
.astimezone(self.output_timezone)
return dt.strftime(conversion_string)
def _load_lsm_data(self, data_var,
conversion_factor=1,
calc_4d_method=None,
calc_4d_dim=None,
time_step=None):
"""
This extracts the LSM data from a folder of netcdf files
"""
data = self.xd.lsm.getvar(data_var,
yslice=self.yslice,
xslice=self.xslice,
calc_4d_method=calc_4d_method,
calc_4d_dim=calc_4d_dim)
if isinstance(time_step, datetime):
data = data.loc[{self.lsm_time_dim: [pd.to_datetime(time_step)]}]
elif time_step is not None:
data = data[{self.lsm_time_dim: [time_step]}]
data = data.fillna(0)
data.values *= conversion_factor
return data
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None):
"""
This function loads data from LSM and converts to GSSHA format
"""
if 'radiation' in gssha_var:
conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]
if gssha_var.startswith('direct_radiation') and not isinstance(lsm_var, basestring):
# direct_radiation = (1-DIFFUSIVE_FRACION)*global_radiation
global_radiation_var, diffusive_fraction_var = lsm_var
global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor)
diffusive_fraction = self._load_lsm_data(diffusive_fraction_var)
if gssha_var.endswith("cc"):
diffusive_fraction /= 100.0
self.data = ((1-diffusive_fraction)*global_radiation)
elif gssha_var.startswith('diffusive_radiation') and not isinstance(lsm_var, basestring):
# diffusive_radiation = DIFFUSIVE_FRACION*global_radiation
global_radiation_var, diffusive_fraction_var = lsm_var
global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor)
diffusive_fraction = self._load_lsm_data(diffusive_fraction_var)
if gssha_var.endswith("cc"):
diffusive_fraction /= 100
self.data = (diffusive_fraction*global_radiation)
elif isinstance(lsm_var, basestring):
self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type])
else:
raise ValueError("Invalid LSM variable ({0}) for GSSHA variable {1}".format(lsm_var, gssha_var))
elif gssha_var == 'relative_humidity' and not isinstance(lsm_var, str):
##CONVERSION ASSUMPTIONS:
##1) These equations are for liquid water and are less accurate below 0 deg C
##2) Not adjusting the pressure for the fact that the temperature
## and moisture measurements are given at 2 m AGL.
##Neither of these should have a significant impact on RH values
##given the uncertainty in the model values themselves.
specific_humidity_var, pressure_var, temperature_var = lsm_var
specific_humidity = self._load_lsm_data(specific_humidity_var)
pressure = self._load_lsm_data(pressure_var)
temperature = self._load_lsm_data(temperature_var)
##To compute the relative humidity at 2m,
##given T, Q (water vapor mixing ratio) at 2 m and PSFC (surface pressure):
##Es(saturation vapor pressure in Pa)
##Qs(saturation mixing ratio)=(0.622*es)/(PSFC-es)
##RH = 100*Q/Qs
es = esat(temperature)
self.data = 100 * specific_humidity/((0.622*es)/(pressure-es))
elif gssha_var == 'relative_humidity_dew':
# https://software.ecmwf.int/wiki/display/CKB/Do+ERA+datasets+contain+parameters+for+near-surface+humidity
# temperature in Kelvin
# RH = 100 * es(Td)/es(T)
dew_point_temp_var, temperature_var = lsm_var
dew_point_temp = self._load_lsm_data(dew_point_temp_var)
temperature = self._load_lsm_data(temperature_var)
self.data = 100 * esat(dew_point_temp)/esat(temperature)
elif gssha_var == 'wind_speed' and not isinstance(lsm_var, str):
# WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
u_vector_var, v_vector_var = lsm_var
conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]
u_vector = self._load_lsm_data(u_vector_var, conversion_factor)
v_vector = self._load_lsm_data(v_vector_var, conversion_factor)
self.data = (np.sqrt(u_vector ** 2 + v_vector ** 2))
elif 'precipitation' in gssha_var and not isinstance(lsm_var, str):
# WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
rain_c_var, rain_nc_var = lsm_var
conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]
rain_c = self._load_lsm_data(rain_c_var, conversion_factor)
rain_nc = self._load_lsm_data(rain_nc_var, conversion_factor)
self.data = rain_c + rain_nc
else:
self.data = self._load_lsm_data(lsm_var,
self.netcdf_attributes[gssha_var]['conversion_factor'][load_type],
self.netcdf_attributes[gssha_var].get('calc_4d_method'),
self.netcdf_attributes[gssha_var].get('calc_4d_dim'),
time_step=time_step)
conversion_function = self.netcdf_attributes[gssha_var].get('conversion_function')
if conversion_function:
self.data.values = self.netcdf_attributes[gssha_var]['conversion_function'][load_type](self.data.values)
if 'precipitation' in gssha_var:
# NOTE: Precipitation is converted from mm/s to mm/hr
# with the conversion factor when it is a rate.
if 'units' in self.data.attrs:
if self.data.attrs['units'] == 'm':
# convert from m to mm
self.data.values *= 1000
if load_type == 'ascii' or load_type == 'netcdf':
# CONVERT TO INCREMENTAL
if gssha_var == 'precipitation_acc':
self.data.values = np.lib.pad(self.data.diff(self.lsm_time_dim).values,
((1, 0), (0, 0), (0, 0)),
'constant',
constant_values=0)
# CONVERT PRECIP TO RADAR (mm/hr) IN FILE
if gssha_var == 'precipitation_inc' or gssha_var == 'precipitation_acc':
# convert from mm to mm/hr
time_step_hours = np.diff(self.xd[self.lsm_time_var].values)[0]/np.timedelta64(1, 'h')
self.data.values /= time_step_hours
# convert to dataset
gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name']
self.data = self.data.to_dataset(name=gssha_data_var_name)
self.data = self.data.rename(
{
self.lsm_lon_dim: 'x',
self.lsm_lat_dim: 'y',
self.lsm_lon_var: 'lon',
self.lsm_lat_var: 'lat'
}
)
self.data.attrs = {'proj4': self.xd.lsm.projection.ExportToProj4()}
self.data[gssha_data_var_name].attrs = {
'standard_name': self.netcdf_attributes[gssha_var]['standard_name'],
'long_name': self.netcdf_attributes[gssha_var]['long_name'],
'units': self.netcdf_attributes[gssha_var]['units'][load_type],
}
def _check_lsm_input(self, data_var_map_array):
"""
This function checks the input var map array
to ensure the required input variables exist
"""
REQUIRED_HMET_VAR_LIST = ['Prcp', 'Pres', 'Temp', 'Clod',
'RlHm', 'Drad', 'Grad', 'WndS']
# make sure all required variables exist
given_hmet_var_list = []
for gssha_data_var, lsm_data_var in data_var_map_array:
gssha_data_hmet_name = self.netcdf_attributes[gssha_data_var]['hmet_name']
if gssha_data_hmet_name in given_hmet_var_list:
raise ValueError("Duplicate parameter for HMET variable {0}"
.format(gssha_data_hmet_name))
else:
given_hmet_var_list.append(gssha_data_hmet_name)
for REQUIRED_HMET_VAR in REQUIRED_HMET_VAR_LIST:
if REQUIRED_HMET_VAR not in given_hmet_var_list:
raise ValueError("ERROR: HMET param is required to continue "
"{0} ...".format(REQUIRED_HMET_VAR))
def _resample_data(self, gssha_var):
"""
This function resamples the data to match the GSSHA grid
IN TESTING MODE
"""
self.data = self.data.lsm.resample(gssha_var, self.gssha_grid)
@staticmethod
def _get_calc_function(gssha_data_var):
"""
This retrives the calc function to convert
to hourly data for the various HMET parameters
"""
calc_function = 'mean'
if gssha_data_var == 'precipitation_inc' or \
gssha_data_var == 'precipitation_acc':
# acc computed as inc previously
calc_function = 'sum'
return calc_function
def _convert_data_to_hourly(self, gssha_data_var):
"""
This function converts the data to hourly data
and then puts it into the data_np_array
USED WHEN GENERATING HMET DATA ONLY
"""
time_step_hours = np.diff(self.data.time)[0]/np.timedelta64(1, 'h')
calc_function = self._get_calc_function(gssha_data_var)
resampled_data = None
if time_step_hours < 1:
resampled_data = self.data.resample('1H', dim='time',
how=calc_function,
keep_attrs=True)
elif time_step_hours > 1:
resampled_data = self.data.resample('1H', dim='time',
keep_attrs=True)
for time_idx in range(self.data.dims['time']):
if time_idx+1 < self.data.dims['time']:
# interpolate between time steps
start_time = self.data.time[time_idx].values
end_time = self.data.time[time_idx+1].values
slope_timeslice = slice(str(start_time), str(end_time))
slice_size = resampled_data.sel(time=slope_timeslice).dims['time'] - 1
first_timestep = resampled_data.sel(time=str(start_time))[gssha_data_var]
slope = (resampled_data.sel(time=str(end_time))[gssha_data_var]
- first_timestep)/float(slice_size)
data_timeslice = slice(str(start_time+np.timedelta64(1, 'm')),
str(end_time-np.timedelta64(1, 'm')))
data_subset = resampled_data.sel(time=data_timeslice)
for xidx in range(data_subset.dims['time']):
data_subset[gssha_data_var][xidx] = first_timestep + slope * (xidx+1)
else:
# just continue to repeat the timestep
start_time = self.data.time[time_idx].values
end_time = resampled_data.time[-1].values
if end_time > start_time:
first_timestep = resampled_data.sel(time=str(start_time))[gssha_data_var]
data_timeslice = slice(str(start_time), str(end_time))
data_subset = resampled_data.sel(time=data_timeslice)
slice_size = 1
if calc_function == "mean":
slice_size = data_subset.dims['time']
for xidx in range(data_subset.dims['time']):
data_subset[gssha_data_var][xidx] = first_timestep/float(slice_size)
if resampled_data is not None:
# make sure coordinates copied
if self.data.lsm.x_var not in resampled_data.coords:
resampled_data.coords[self.data.lsm.x_var] = self.data.coords[self.data.lsm.x_var]
if self.data.lsm.y_var not in resampled_data.coords:
resampled_data.coords[self.data.lsm.y_var] = self.data.coords[self.data.lsm.y_var]
self.data = resampled_data
def lsm_var_to_grid(self, out_grid_file, lsm_data_var, gssha_convert_var, time_step=0, ascii_format='grass'):
"""This function takes array data and writes out a GSSHA ascii grid.
Parameters:
out_grid_file(str): Location of ASCII file to generate.
lsm_data_var(str or list): This is the variable name for precipitation in the LSM files.
gssha_convert_var(str): This is the name of the variable used in GRIDtoGSSHA to convert data with.
time_step(Optional[int, datetime]): Time step in file to export data from. Default is the initial time step.
ascii_format(Optional[str]): Default is 'grass' for GRASS ASCII. If you want Arc ASCII, use 'arc'.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
# STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
# STEP 2: Generate init snow grid (from LSM)
# NOTE: Card is INIT_SWE_DEPTH
g2g.lsm_var_to_grid(out_grid_file="E:/GSSHA/swe_grid.asc",
lsm_data_var='SWE_inst',
gssha_convert_var='swe')
"""
self._load_converted_gssha_data_from_lsm(gssha_convert_var, lsm_data_var, 'grid', time_step)
gssha_data_var_name = self.netcdf_attributes[gssha_convert_var]['gssha_name']
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
self._resample_data(gssha_data_var_name)
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform)
if ascii_format.strip().lower() == 'grass':
arr_grid.to_grass_ascii(out_grid_file)
elif ascii_format.strip().lower() == 'arc':
arr_grid.to_arc_ascii(out_grid_file)
else:
raise ValueError("Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed.")
def lsm_precip_to_gssha_precip_gage(self, out_gage_file, lsm_data_var, precip_type="RADAR"):
"""This function takes array data and writes out a GSSHA precip gage file.
See: http://www.gsshawiki.com/Precipitation:Spatially_and_Temporally_Varied_Precipitation
.. note::
GSSHA CARDS:
* PRECIP_FILE card with path to gage file
* RAIN_INV_DISTANCE or RAIN_THIESSEN
Parameters:
out_gage_file(str): Location of gage file to generate.
lsm_data_var(str or list): This is the variable name for precipitation in the LSM files.
If there is a string, it assumes a single variable. If it is a
list, then it assumes the first element is the variable name for
RAINC and the second is for RAINNC
(see: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables).
precip_type(Optional[str]): This tells if the data is the ACCUM, RADAR, or GAGES data type. Default is 'RADAR'.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate GAGE data (from WRF)
g2g.lsm_precip_to_gssha_precip_gage(out_gage_file="E:/GSSHA/wrf_gage_1.gag",
lsm_data_var=['RAINC', 'RAINNC'],
precip_type='ACCUM')
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate GAGE data
g2g.lsm_precip_to_gssha_precip_gage(out_gage_file="E:/GSSHA/hrrr_gage_1.gag",
lsm_data_var='prate',
precip_type='RADAR')
"""
VALID_TYPES = ["ACCUM", "RADAR", "GAGES"] #NOTE: "RATES" currently not supported
if precip_type not in VALID_TYPES:
raise ValueError("ERROR: {0} is not a valid type. Valid types include: {1}".format(type, VALID_TYPES))
gssha_precip_type = "precipitation_inc"
if precip_type == "ACCUM":
gssha_precip_type = "precipitation_acc"
elif precip_type == "RADAR":
gssha_precip_type = "precipitation_rate"
self._load_converted_gssha_data_from_lsm(gssha_precip_type, lsm_data_var, 'gage')
gssha_data_var_name = self.netcdf_attributes[gssha_precip_type]['gssha_name']
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
#LOOP THROUGH TIME
with io_open(out_gage_file, 'w') as gage_file:
if self.data.dims['time']>1:
gage_file.write(u"EVENT \"Event of {0} to {1}\"\n".format(self._time_to_string(self.data.lsm.datetime[0]),
self._time_to_string(self.data.lsm.datetime[-1])))
else:
gage_file.write(u"EVENT \"Event of {0}\"\n".format(self._time_to_string(self.data.lsm.datetime[0])))
gage_file.write(u"NRPDS {0}\n".format(self.data.dims['time']))
gage_file.write(u"NRGAG {0}\n".format(self.data.dims['x']*self.data.dims['y']))
y_coords, x_coords = self.data.lsm.coords
for y_idx in range(self.data.dims['y']):
for x_idx in range(self.data.dims['x']):
coord_idx = y_idx*self.data.dims['x'] + x_idx
gage_file.write(u"COORD {0} {1} \"center of pixel #{2}\"\n".format(x_coords[y_idx, x_idx],
y_coords[y_idx, x_idx],
coord_idx))
for time_idx in range(self.data.dims['time']):
date_str = self._time_to_string(self.data.lsm.datetime[time_idx])
data_str = " ".join(self.data[gssha_data_var_name][time_idx].values.ravel().astype(str))
gage_file.write(u"{0} {1} {2}\n".format(precip_type, date_str, data_str))
def _write_hmet_card_file(self, hmet_card_file_path, main_output_folder):
"""
This function writes the HMET_ASCII card file
with ASCII file list for input to GSSHA
"""
with io_open(hmet_card_file_path, 'w') as out_hmet_list_file:
for hour_time in self.data.lsm.datetime:
date_str = self._time_to_string(hour_time, "%Y%m%d%H")
out_hmet_list_file.write(u"{0}\n".format(path.join(main_output_folder, date_str)))
def lsm_data_to_arc_ascii(self, data_var_map_array,
main_output_folder=""):
"""Writes extracted data to Arc ASCII file format into folder
to be read in by GSSHA. Also generates the HMET_ASCII card file
for GSSHA in the folder named 'hmet_file_list.txt'.
.. warning:: For GSSHA 6 Versions, for GSSHA 7 or greater, use lsm_data_to_subset_netcdf.
.. note::
GSSHA CARDS:
* HMET_ASCII pointing to the hmet_file_list.txt
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
main_output_folder(Optional[str]): This is the path to place the generated ASCII files.
If not included, it defaults to
os.path.join(self.gssha_project_folder, "hmet_ascii_data").
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate ASCII DATA
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_arc_ascii(data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate ASCII DATA
#EXAMPLE DATA ARRAY 1: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_arc_ascii(data_var_map_array)
"""
self._check_lsm_input(data_var_map_array)
if not main_output_folder:
main_output_folder = path.join(self.gssha_project_folder, "hmet_ascii_data")
try:
mkdir(main_output_folder)
except OSError:
pass
log.info("Outputting HMET data to {0}".format(main_output_folder))
#PART 2: DATA
for data_var_map in data_var_map_array:
gssha_data_var, lsm_data_var = data_var_map
gssha_data_hmet_name = self.netcdf_attributes[gssha_data_var]['hmet_name']
gssha_data_var_name = self.netcdf_attributes[gssha_data_var]['gssha_name']
self._load_converted_gssha_data_from_lsm(gssha_data_var, lsm_data_var, 'ascii')
self._convert_data_to_hourly(gssha_data_var_name)
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
for time_idx in range(self.data.dims['time']):
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name][time_idx].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform,
nodata_value=-9999)
date_str = self._time_to_string(self.data.lsm.datetime[time_idx], "%Y%m%d%H")
ascii_file_path = path.join(main_output_folder, "{0}_{1}.asc".format(date_str, gssha_data_hmet_name))
arr_grid.to_arc_ascii(ascii_file_path)
#PART 3: HMET_ASCII card input file with ASCII file list
hmet_card_file_path = path.join(main_output_folder, 'hmet_file_list.txt')
self._write_hmet_card_file(hmet_card_file_path, main_output_folder)
def lsm_data_to_subset_netcdf(self, netcdf_file_path,
data_var_map_array,
resample_method=None):
"""Writes extracted data to the NetCDF file format
.. todo:: NetCDF output data time is always in UTC time. Need to convert to local timezone for GSSHA.
.. warning:: The NetCDF GSSHA file is only supported in GSSHA 7 or greater.
.. note::
GSSHA CARDS:
* HMET_NETCDF pointing to the netcdf_file_path
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
netcdf_file_path(string): Path to output the NetCDF file for GSSHA.
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
resample_method(Optional[gdalconst]): Resample input method to match hmet data to GSSHA grid for NetCDF output. Default is None.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_subset_netcdf("E/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 2: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_subset_netcdf("E:/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
"""
self._check_lsm_input(data_var_map_array)
output_datasets = []
#DATA
for gssha_var, lsm_var in data_var_map_array:
if gssha_var in self.netcdf_attributes:
self._load_converted_gssha_data_from_lsm(gssha_var, lsm_var, 'netcdf')
#previously just added data, but needs to be hourly
gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name']
self._convert_data_to_hourly(gssha_data_var_name)
if resample_method:
self._resample_data(gssha_data_var_name)
else:
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
output_datasets.append(self.data)
else:
raise ValueError("Invalid GSSHA variable name: {0} ...".format(gssha_var))
output_dataset = xr.merge(output_datasets)
#add global attributes
output_dataset.attrs['Convention'] = 'CF-1.6'
output_dataset.attrs['title'] = 'GSSHA LSM Input'
output_dataset.attrs['history'] = 'date_created: {0}'.format(datetime.utcnow())
output_dataset.attrs['proj4'] = self.data.attrs['proj4']
output_dataset.attrs['geotransform'] = self.data.attrs['geotransform']
output_dataset.to_netcdf(netcdf_file_path)
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
ndingwall/scikit-learn | benchmarks/bench_glm.py | 31 | 1478 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
if __name__ == '__main__':
import matplotlib.pyplot as plt
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = (datetime.now() - start).total_seconds()
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = (datetime.now() - start).total_seconds()
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = (datetime.now() - start).total_seconds()
plt.figure('scikit-learn GLM benchmark results')
plt.xlabel('Dimensions')
plt.ylabel('Time (s)')
plt.plot(dimensions, time_ridge, color='r')
plt.plot(dimensions, time_ols, color='g')
plt.plot(dimensions, time_lasso, color='b')
plt.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Berreman4x4/Berreman4x4 | examples/TIR.py | 1 | 1940 | #!/usr/bin/python
# encoding: utf-8
# Berreman4x4 example
# Author: O. Castany
# Total Internal Reflection
# Glass / Air
import numpy, Berreman4x4
from Berreman4x4 import c, pi
from numpy import exp, cos, arcsin, real, sqrt
import matplotlib.pyplot as pyplot
print("\n*** Glass / Air ***\n")
############################################################################
# Structure definition
# Refractive indices
n_f = 1.5
n_b = 1.0
# Materials:
glass = Berreman4x4.IsotropicNonDispersiveMaterial(n_f)
air = Berreman4x4.IsotropicNonDispersiveMaterial(n_b)
# Layer and half-spaces:
front = Berreman4x4.IsotropicHalfSpace(glass)
back = Berreman4x4.IsotropicHalfSpace(air)
# Structure:
s = Berreman4x4.Structure(front, [], back)
# Wavelength and wavenumber:
lbda = 1e-6
k0 = 2*pi/lbda
# Variation of incidence angle
Phi_list = numpy.linspace(0, pi/2*0.999)
Kx = front.get_Kx_from_Phi(Phi_list)
############################################################################
# Calculation with Berreman4x4
data = Berreman4x4.DataList([s.evaluate(kx,k0) for kx in Kx])
R_p = data.get('R_pp')
R_s = data.get('R_ss')
T_p = data.get('T_pp')
T_s = data.get('T_ss')
############################################################################
# Plotting
fig = pyplot.figure(figsize=(12., 6.))
pyplot.rcParams['axes.prop_cycle'] = pyplot.cycler('color', 'bgrcbg')
ax = fig.add_axes([0.1, 0.1, 0.7, 0.8])
y = numpy.vstack((R_s,R_p)).T
legend = ("R_s","R_p")
# lines = ax.plot(Kx, y)
lines = ax.plot(Phi_list*180/pi, y)
ax.legend(lines, legend,
loc='upper left', bbox_to_anchor=(1.05, 1), borderaxespad=0.)
ax.set_title("FTIR: Glass / Air")
ax.set_xlabel(u"Angle of incidence (°)")
ax.set_ylabel(r"Reflexion coefficients $R$")
ax.set_ylim(top=1.05)
try:
__IPYTHON__ # Are we using ipython?
pyplot.ion() # Turn on interactive mode
except NameError:
pass
# s.drawStructure()
pyplot.show()
| gpl-3.0 |
KT12/keepintouch | accesslinkedin.py | 1 | 2248 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# Credentials stored in .env file in same folder
import linkedin
import requests
import oauth2 as oauth
from urllib.parse import urlparse
from dotenv import load_dotenv
import os
from xml.etree import ElementTree
import xmltodict
import json
from pandas.io.json import json_normalize
print('Rember to use the correct directory for your machine')
os.chdir('repos/keepint')
# Load LinkedIn credentials
load_dotenv('.env')
KEY = os.environ.get('CLIENT_ID')
SECRET = os.environ.get('CLIENT_SECRET')
URI = 'http%3A%2F%2Fwww.honda.com.br'
AUTH_URL = 'https://www.linkedin.com/oauth/v2/authorization?response_type=code&client_id=' + \
KEY + '&redirect_uri=' + URI + '&state=987654321&scope=r_basicprofile'
# Code originally from URI
CODE = os.environ.get('ACCESS_CODE')
URL = 'https://www.linkedin.com/oauth/v2/accessToken?grant_type=authorization_code&code=' + CODE + \
'&redirect_uri=' + URI + '&client_id=' + KEY + '&client_secret=' + SECRET
# Token originally from LinkedIn JSON
TOKEN = os.environ.get('ACCESS_TOKEN')
XML_URL = "https://api.linkedin.com/v1/people/~:(id,first-name,last-name,headline,picture-url,industry,summary,specialties,positions:(id,title,summary,start-date,end-date,is-current,company:(id,name,type,size,industry,ticker)),educations:(id,school-name,field-of-study,start-date,end-date,degree,activities,notes),associations,interests,num-recommenders,date-of-birth,publications:(id,title,publisher:(name),authors:(id,name),date,url,summary),patents:(id,title,summary,number,status:(id,name),office:(name),inventors:(id,name),date,url),languages:(id,language:(name),proficiency:(level,name)),skills:(id,skill:(name)),certifications:(id,name,authority:(name),number,start-date,end-date),courses:(id,name,number),recommendations-received:(id,recommendation-type,recommendation-text,recommender),honors-awards,three-current-positions,three-past-positions,volunteer)?oauth2_access_token=" + TOKEN
XML_response = requests.get(XML_URL)
tree = ElementTree.fromstring(XML_response.content)
XML_data = xmltodict.parse(XML_response.content)
person_df = json_normalize(XML_data)
idx = person_df.T.index.values
person_df.to_csv('person_data.csv') | mit |
pombredanne/bokeh | examples/charts/server/interactive_excel.py | 6 | 3202 | import xlwings as xw
import pandas as pd
from pandas.util.testing import assert_frame_equal
from bokeh.client import push_session
from bokeh.charts import Line, Bar
from bokeh.charts.operations import blend
from bokeh.models import Paragraph
from bokeh.io import curdoc, hplot, vplot
wb = xw.Workbook() # Creates a connection with a new workbook
# write example data to notebook
xw.Range('A1').value = pd.DataFrame(
{
'Italy':[3016.17,3114.73, 3128.31, 3137.38, 3089.51, 3016.32, 2942.62, 2735.05, 2813.51],
'Japan':[4004.67, 3963.47, 4089.39, 4073.75, 4068.52, 4031.99, 3880.45, 3700.22, 3883.046557],
'Brazil':[1084.48, 1075.76, 1092.31, 1096.13, 1140.61, 1158.39, 1186.27, 1240.22, 1297.91],
'USA':[8056.55, 7825.18, 7838.52, 7788.32, 7875.28, 7840.53, 7691.69, 7749.23, 7481.02],
'year':[2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008],
})
# read back to make sure we have same data format..
data = xw.Range('A1').table.value
energy_per_capita = pd.DataFrame(data[1:], columns=data[0])
countries = ['Brazil', 'Italy', 'USA', 'Japan']
def create_line(data):
""" Convenience function to create a new line chart with the right args """
return Line(data, x='year', y=countries,
legend=True, width=1400, height=300, ylabel='Energy use per capita',
palette=['purple', 'green', 'blue', 'pink'])
def create_bar(data):
op = blend(*countries, labels_name='countries', name='energy')
return Bar(data, label='year', values=op, color='countries', group='countries',
width=1400, height=600, ylabel='Energy use per capita',
palette=['purple', 'green', 'blue', 'pink'],
legend=True)
def data_changed(old):
""" Returns a new dataframe if data has changed on the excel workbook """
data = xw.Range('A1').table.value
df = pd.DataFrame(data[1:], columns=data[0])
try:
assert_frame_equal(df, old)
return None
except AssertionError:
return df
# open a session to keep our local document in sync with server
session = push_session(curdoc())
def update():
global layout
global energy_per_capita
new_df = data_changed(energy_per_capita)
if new_df is not None:
energy_per_capita = new_df
plots_box.children[0] = create_line(energy_per_capita)
plots_box.children[1] = create_bar(energy_per_capita)
line = create_line(energy_per_capita)
bar = create_bar(energy_per_capita)
desc1 = Paragraph(text="""
This example shows live integration between bokeh server and Excel using
XLWings.""")
desc2 = Paragraph(text="""
*** YOU MUST HAVE EXCEL and XLWINGS INSTALLED ON YOUR MACHINE FOR IT TO WORK ***
""")
desc3 = Paragraph(text="""
It opens this plots window and an excel spreadsheet instance with the
values being plotted. When user changes the values on the excel spreadsheet
the plots will be updated accordingly. It's not required to save the spreadsheet for the plots to update.
""")
plots_box = hplot(line, bar)
layout = vplot(desc1, desc2, desc3, plots_box)
curdoc().add_root(layout)
curdoc().add_periodic_callback(update, 500)
session.show() # open the document in a browser
session.loop_until_closed() # run forever
| bsd-3-clause |
marcocaccin/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
bmcfee/gordon | gordon/io/features.py | 1 | 11661 | # Copyright (C) 2010 Ron Weiss
#
# This file is part of Gordon.
#
# Gordon is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gordon is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gordon. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for managing gordon features."""
import itertools
import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import tables
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
class CachedFeatureFile(object):
"""Interface to an HDF5 file containing cached features.
Features are indexed using a gordon FeatureExtractor object and an
optional set of keyword arguments.
"""
def __init__(self, filename, mode='r'):
self.filename = filename
self.open(mode)
def open(self, mode='r'):
self.h5file = tables.openFile(self.filename, mode=mode)
def close(self):
self.h5file.close()
# The contents of the file are organized using a tree structure of
# the form: "/FeatureExtractorID#/kwargs"
#
# This allows a single file to store the output of multiple
# feature extractors (on a single track), using any number of
# different settings. kwargs is a string representing the
# arguments passed in to Track.feature('name', **kwargs), i.e. a
# specific configuration of the feature extractor.
@staticmethod
def _args_to_string(kwargs):
if kwargs:
s = ''.join('%s%s' % (k,v) for k,v in sorted(kwargs.iteritems()))
else:
s = 'None'
return s
@staticmethod
def _get_hdf5_path(feature_extractor, kwargs):
groupname = 'FeatureExtractor%d' % feature_extractor.id
arrayname = CachedFeatureFile._args_to_string(kwargs)
return groupname, arrayname
def has_features(self, feature_extractor, kwargs=None):
"""Check if the file contains a set of features.
Return True if the file contains features extracted using the
given FeatureExtractor object with the given kwargs."""
groupname, arrayname = self._get_hdf5_path(feature_extractor, kwargs)
try:
group = self.h5file.getNode(self.h5file.root, groupname)
array = self.h5file.getNode(group, arrayname)
# Sanity check.
assert array.attrs.feature_extractor_id == feature_extractor.id
assert array.attrs.kwargs == kwargs
return True
except tables.NoSuchNodeError:
try:
# Check if the cached feature is a tuple.
group = self.h5file.getNode(self.h5file.root, groupname)
array = self.h5file.getNode(group, '%s[0]' % arrayname)
return True
except tables.NoSuchNodeError:
return False
def get_features(self, feature_extractor, kwargs=None):
"""Read cached features from this file.
Depending on the output of feature_extractor.extract_features,
this will return either a numpy array or a tuple of numpy arrays.
Raises tables.NoSuchNodeError if the file doesn't contain
features corresponding to (feature_extractor, kwargs).
"""
groupname, arrayname = self._get_hdf5_path(feature_extractor, kwargs)
group = self.h5file.getNode(self.h5file.root, groupname)
try:
features = self._read_features_array(group, arrayname,
feature_extractor, kwargs)
except tables.NoSuchNodeError:
# The FeatureExtractor must have returned a tuple instead
# of a single array.
features = self._read_features_tuple(group, arrayname,
feature_extractor, kwargs)
return features
def _read_features_array(self, group, arrayname, feature_extractor,
kwargs):
array = self.h5file.getNode(group, arrayname)
# Sanity check.
assert array.attrs.feature_extractor_id == feature_extractor.id
assert array.attrs.kwargs == kwargs
# Copy the array into memory so we don't have to keep the h5
# file around longer than necessary.
return np.array(array)
def _read_features_tuple(self, group, arrayname, feature_extractor,
kwargs):
arrays = []
for n in itertools.count():
curr_arrayname = '%s[%d]' % (arrayname, n)
try:
arrays.append(self._read_features_array(
group, curr_arrayname, feature_extractor, kwargs))
except tables.NoSuchNodeError:
if arrays:
break
else:
raise
return tuple(arrays)
def list_all_features(self):
"""Return a list of all features contained in this file.
Each entry of the feature list contains a tuple of the form:
('name', FeatureExtractor.name, 'kwarg1', val1, 'kwarg2', val2, ...)
I.e. the keyword arguments passed to Track.features() to
compute the corresponding features.
"""
features_list = []
for group in self.h5file.iterNodes(self.h5file.root):
for array in self.h5file.iterNodes(group):
keylist = ['name', array.attrs.feature_extractor_name]
for k,v in array.attrs.kwargs.iteritems():
keylist.append(k)
keylist.append(v)
features_list.append(tuple(keylist))
return features_list
def get_all_features(self):
"""Return a dictionary of all features contained in this file.
The dictionary is keyed using a tuple of the form:
('name', FeatureExtractor.name, 'kwarg1', val1, 'kwarg2', val2, ...)
I.e. the key corresponds to the keyword arguments passed to
Track.features() to compute the corresponding features.
"""
features_dict = {}
for group in self.h5file.iterNodes(self.h5file.root):
for array in self.h5file.iterNodes(group):
keylist = ['name', array.attrs.feature_extractor_name]
for k,v in array.attrs.kwargs.iteritems():
keylist.append(k)
keylist.append(v)
key = tuple(keylist)
try:
if not key in features_dict:
features_dict[key] = np.copy(array)
else:
# Feature must be a tuple.
if not isinstance(features_dict[key], tuple):
features_dict[key] = (features_dict[key],)
feature_list = list(features_dict[key])
feature_list.append(np.copy(array))
features_dict[key] = tuple(feature_list)
except TypeError:
# This will happen if the cached feature includes
# a dict in it's keyword arguments, since dicts
# are not hashable.
warnings.warn('Ignoring cached feature, name=%s, kwargs=%s'
% (array.attrs.feature_extractor_name,
array.attrs.kwargs.iteritems()))
return features_dict
def set_features(self, feature_extractor, features, kwargs=None):
"""Write the given features to this file.
features must be a numpy array or tuple of numpy arrays.
"""
groupname, arrayname = self._get_hdf5_path(feature_extractor, kwargs)
try:
group = self.h5file.getNode(self.h5file.root, groupname)
except tables.NoSuchNodeError:
group = self.h5file.createGroup(self.h5file.root, groupname,
str(feature_extractor.name))
if isinstance(features, tuple):
for n,x in enumerate(features):
curr_arrayname = '%s[%d]' % (arrayname, n)
self._write_features_array(group, curr_arrayname, x,
feature_extractor, kwargs,
isTuple=True)
else:
self._write_features_array(group, arrayname, features,
feature_extractor, kwargs)
def _write_features_array(self, group, arrayname, array, feature_extractor,
kwargs, isTuple=False):
# Should we use createCArray for compression?
array = self.h5file.createArray(group, arrayname, np.asarray(array))
array.attrs.feature_extractor_id = feature_extractor.id
array.attrs.feature_extractor_name = str(feature_extractor.name)
array.attrs.kwargs = kwargs
array.attrs.isTuple = isTuple
def del_features(self, feature_extractor, kwargs=None):
"""Delete cached features from this file.
Raises tables.NoSuchNodeError if the file doesn't contain
features corresponding to (feature_extractor, kwargs).
"""
groupname, arrayname = self._get_hdf5_path(feature_extractor, kwargs)
try:
group = self.h5file.getNode(self.h5file.root, groupname)
self.h5file.removeNode(group, arrayname)
except tables.NoSuchNodeError:
# Check if the cached feature is a tuple.
try:
deleted_an_array = False
group = self.h5file.getNode(self.h5file.root, groupname)
for n in itertools.count():
self.h5file.removeNode(group, '%s[%d]' % (arrayname, n))
deleted_an_array = True
except:
if not deleted_an_array:
raise
def del_all_features(self, feature_extractor=None):
"""Delete all cached features from this file.
If feature_extractor is specified, only delete features
corresponding to that FeatureExtractor.
"""
if feature_extractor:
groupname, arrayname = self._get_hdf5_path(feature_extractor, None)
self.h5file.removeNode(self.h5file.root, groupname, recursive=True)
else:
for group in self.h5file.iterNodes(self.h5file.root):
self.h5file.removeNode(group, recursive=True)
def plot_features(feats):
"""Default feature plotting function."""
if isinstance(feats, tuple):
feats = feats[0]
COLORBAR_WIDTH = 0.035
COLORBAR_PAD = 0.015
if feats.ndim == 2:
plt.imshow(feats.T, origin='lower', interpolation='nearest',
aspect='auto')
plt.colorbar(fraction=COLORBAR_WIDTH, pad=COLORBAR_PAD)
else:
plt.plot(feats)
# Compensate for colorbar axes in case this figure also
# contains some images.
axes = plt.gca()
bounds = axes.get_position().bounds
axes.set_position((bounds[0], bounds[1],
bounds[2] * (1 - COLORBAR_WIDTH - COLORBAR_PAD),
bounds[3]))
plt.gca().set_xlim((0, len(feats)-1))
| gpl-3.0 |
steffengraber/nest-simulator | pynest/examples/glif_cond_neuron.py | 14 | 9655 | # -*- coding: utf-8 -*-
#
# glif_cond_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Conductance-based generalized leaky integrate and fire (GLIF) neuron example
----------------------------------------------------------------------------
Simple example of how to use the ``glif_cond`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, injecting current traces, threshold traces, synaptic
conductance traces and spikes are shown.
KEYWORDS: glif_cond
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_cond`` node. The node is
# created by setting relative model mechanism parameters. Other neuron
# parameters are set as default. The five ``glif_cond`` node handles are
# combined as a list. Note that the default number of synaptic ports
# is two for spike inputs. One port is excitation receptor with time
# constant being 0.2 ms and reversal potential being 0.0 mV. The other port is
# inhibition receptor with time constant being 2.0 ms and -85.0 mV.
# Note that users can set as many synaptic ports as needed for ``glif_cond``
# by setting array parameters ``tau_syn`` and ``E_rev`` of the model.
n_lif = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_r = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc_a = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_cond neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron(to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current. Configuration of
# the Poisson generator includes the definition of the start and stop times and
# the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 15000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator to receptor 0, the excitation spike generator
# and the Poisson generator (via parrot neuron) to receptor 1, and the
# inhibition spike generator to receptor 2 of the GLIF neurons.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_cond neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 2})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 1})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "g_1", "g_2",
"threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_recorder`` is created and connected to the neurons record the
# spikes generated by the glif_cond neurons.
sr = nest.Create("spike_recorder")
nest.Connect(neurons, sr)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike recorder.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sr.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents (in strong blue),
# the sum of after spike currents (in cyan) in the third panel; and the synaptic
# conductances of the two receptors (in blue and orange) in responding to the
# spike inputs to the neurons in the fourth panel. We plot all these four
# panels for each level of GLIF model in a separated figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(4, 1, height_ratios=[2, 1, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_cond neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
ax4 = plt.subplot(gs[3])
plt.plot(t, data["g_1"][senders == node_id], "-")
plt.plot(t, data["g_2"][senders == node_id], "--")
plt.legend(["G_1", "G_2"])
plt.ylabel("G (nS)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
Nathx/think_stats | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
jreback/pandas | pandas/tests/extension/json/test_json.py | 1 | 10657 | import collections
import operator
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension import base
from .array import JSONArray, JSONDtype, make_data
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercible to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {"a": 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{"b": 1}, {"c": 4}, {"a": 2, "c": 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{"b": 1}, {}, {"a": 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray(
[
{"b": 1},
{"b": 1},
{},
{},
{"a": 0, "c": 2},
{"a": 0, "c": 2},
{"b": 1},
{"c": 2},
]
)
class BaseJSON:
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
@classmethod
def assert_series_equal(cls, left, right, *args, **kwargs):
if left.dtype.name == "json":
assert left.dtype == right.dtype
left = pd.Series(
JSONArray(left.values.astype(object)), index=left.index, name=left.name
)
right = pd.Series(
JSONArray(right.values.astype(object)),
index=right.index,
name=right.name,
)
tm.assert_series_equal(left, right, *args, **kwargs)
@classmethod
def assert_frame_equal(cls, left, right, *args, **kwargs):
obj_type = kwargs.get("obj", "DataFrame")
tm.assert_index_equal(
left.columns,
right.columns,
exact=kwargs.get("check_column_type", "equiv"),
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
obj=f"{obj_type}.columns",
)
jsons = (left.dtypes == "json").index
for col in jsons:
cls.assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray(
[
collections.UserDict({"a": 1}),
collections.UserDict({"b": 2}),
collections.UserDict({"c": 3}),
]
)
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
msg = r"ExtensionArray are different"
with pytest.raises(AssertionError, match=msg):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError, match=msg):
self.assert_frame_equal(a.to_frame(), b.to_frame())
@pytest.mark.xfail(
reason="comparison method not implemented for JSONArray (GH-37867)"
)
def test_contains(self, data):
# GH-37867
super().test_contains(data)
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
@pytest.mark.xfail(reason="RecursionError, GH-33900")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
# RecursionError: maximum recursion depth exceeded in comparison
super().test_series_constructor_no_data_with_index(dtype, na_value)
@pytest.mark.xfail(reason="RecursionError, GH-33900")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# RecursionError: maximum recursion depth exceeded in comparison
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
@pytest.mark.xfail(reason="collection as scalar, GH-33901")
def test_series_constructor_scalar_with_index(self, data, dtype):
# TypeError: All values must be of type <class 'collections.abc.Mapping'>
super().test_series_constructor_scalar_with_index(data, dtype)
class TestReshaping(BaseJSON, base.BaseReshapingTests):
@pytest.mark.skip(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""
@pytest.mark.xfail(reason="dict for NA")
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
# this matches otherwise
return super().test_unstack(data, index)
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_value_counts_with_normalize(self, data):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
def test_argsort(self, data_for_sorting):
super().test_argsort(data_for_sorting)
def test_argsort_missing(self, data_missing_for_sorting):
super().test_argsort_missing(data_missing_for_sorting)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
super().test_sort_values(data_for_sorting, ascending, sort_by_key)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(
self, data_missing_for_sorting, ascending, sort_by_key
):
super().test_sort_values_missing(
data_missing_for_sorting, ascending, sort_by_key
)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_first(self, data):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
@pytest.mark.skip(reason="broadcasting error")
def test_where_series(self, data, na_value):
# Fails with
# *** ValueError: operands could not be broadcast together
# with shapes (4,) (4,) (0,)
super().test_where_series(data, na_value)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_searchsorted(self, data_for_sorting):
super().test_searchsorted(data_for_sorting)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_equals(self, data, na_value, as_series):
pass
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
pass
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="unsupported"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
class TestPrinting(BaseJSON, base.BasePrintingTests):
pass
| bsd-3-clause |
jreback/pandas | pandas/tests/reductions/test_stat_reductions.py | 2 | 9559 | """
Tests for statistical reductions of 2nd moment or higher: var, skew, kurt, ...
"""
import inspect
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
class TestDatetimeLikeStatReductions:
@pytest.mark.parametrize("box", [Series, pd.Index, DatetimeArray])
def test_dt64_mean(self, tz_naive_fixture, box):
tz = tz_naive_fixture
dti = pd.date_range("2001-01-01", periods=11, tz=tz)
# shuffle so that we are not just working with monotone-increasing
dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6])
dtarr = dti._data
obj = box(dtarr)
assert obj.mean() == pd.Timestamp("2001-01-06", tz=tz)
assert obj.mean(skipna=False) == pd.Timestamp("2001-01-06", tz=tz)
# dtarr[-2] will be the first date 2001-01-1
dtarr[-2] = pd.NaT
obj = box(dtarr)
assert obj.mean() == pd.Timestamp("2001-01-06 07:12:00", tz=tz)
assert obj.mean(skipna=False) is pd.NaT
@pytest.mark.parametrize("box", [Series, pd.Index, PeriodArray])
def test_period_mean(self, box):
# GH#24757
dti = pd.date_range("2001-01-01", periods=11)
# shuffle so that we are not just working with monotone-increasing
dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6])
# use hourly frequency to avoid rounding errors in expected results
# TODO: flesh this out with different frequencies
parr = dti._data.to_period("H")
obj = box(parr)
with pytest.raises(TypeError, match="ambiguous"):
obj.mean()
with pytest.raises(TypeError, match="ambiguous"):
obj.mean(skipna=True)
# parr[-2] will be the first date 2001-01-1
parr[-2] = pd.NaT
with pytest.raises(TypeError, match="ambiguous"):
obj.mean()
with pytest.raises(TypeError, match="ambiguous"):
obj.mean(skipna=True)
@pytest.mark.parametrize("box", [Series, pd.Index, TimedeltaArray])
def test_td64_mean(self, box):
tdi = pd.TimedeltaIndex([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4], unit="D")
tdarr = tdi._data
obj = box(tdarr)
result = obj.mean()
expected = np.array(tdarr).mean()
assert result == expected
tdarr[0] = pd.NaT
assert obj.mean(skipna=False) is pd.NaT
result2 = obj.mean(skipna=True)
assert result2 == tdi[1:].mean()
# exact equality fails by 1 nanosecond
assert result2.round("us") == (result * 11.0 / 10).round("us")
class TestSeriesStatReductions:
# Note: the name TestSeriesStatReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def _check_stat_op(
self, name, alternate, string_series_, check_objects=False, check_allna=False
):
with pd.option_context("use_bottleneck", False):
f = getattr(Series, name)
# add some NaNs
string_series_[5:15] = np.NaN
# mean, idxmax, idxmin, min, and max are valid for dates
if name not in ["max", "min", "mean", "median", "std"]:
ds = Series(pd.date_range("1/1/2001", periods=10))
msg = f"'DatetimeArray' does not implement reduction '{name}'"
with pytest.raises(TypeError, match=msg):
f(ds)
# skipna or no
assert pd.notna(f(string_series_))
assert pd.isna(f(string_series_, skipna=False))
# check the result is correct
nona = string_series_.dropna()
tm.assert_almost_equal(f(nona), alternate(nona.values))
tm.assert_almost_equal(f(string_series_), alternate(nona.values))
allna = string_series_ * np.nan
if check_allna:
assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# GH#2888
items = [0]
items.extend(range(2 ** 40, 2 ** 40 + 1000))
s = Series(items, dtype="int64")
tm.assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(pd.bdate_range("1/1/2000", periods=10))
res = f(s)
exp = alternate(s)
assert res == exp
# check on string data
if name not in ["sum", "min", "max"]:
with pytest.raises(TypeError, match=None):
f(Series(list("abc")))
# Invalid axis.
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
f(string_series_, axis=1)
# Unimplemented numeric_only parameter.
if "numeric_only" in inspect.getfullargspec(f).args:
with pytest.raises(NotImplementedError, match=name):
f(string_series_, numeric_only=True)
def test_sum(self):
string_series = tm.makeStringSeries().rename("series")
self._check_stat_op("sum", np.sum, string_series, check_allna=False)
def test_mean(self):
string_series = tm.makeStringSeries().rename("series")
self._check_stat_op("mean", np.mean, string_series)
def test_median(self):
string_series = tm.makeStringSeries().rename("series")
self._check_stat_op("median", np.median, string_series)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=range(10))
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
def test_prod(self):
string_series = tm.makeStringSeries().rename("series")
self._check_stat_op("prod", np.prod, string_series)
def test_min(self):
string_series = tm.makeStringSeries().rename("series")
self._check_stat_op("min", np.min, string_series, check_objects=True)
def test_max(self):
string_series = tm.makeStringSeries().rename("series")
self._check_stat_op("max", np.max, string_series, check_objects=True)
def test_var_std(self):
string_series = tm.makeStringSeries().rename("series")
datetime_series = tm.makeTimeSeries().rename("ts")
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op("std", alt, string_series)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op("var", alt, string_series)
result = datetime_series.std(ddof=4)
expected = np.std(datetime_series.values, ddof=4)
tm.assert_almost_equal(result, expected)
result = datetime_series.var(ddof=4)
expected = np.var(datetime_series.values, ddof=4)
tm.assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = datetime_series.iloc[[0]]
result = s.var(ddof=1)
assert pd.isna(result)
result = s.std(ddof=1)
assert pd.isna(result)
def test_sem(self):
string_series = tm.makeStringSeries().rename("series")
datetime_series = tm.makeTimeSeries().rename("ts")
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op("sem", alt, string_series)
result = datetime_series.sem(ddof=4)
expected = np.std(datetime_series.values, ddof=4) / np.sqrt(
len(datetime_series.values)
)
tm.assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = datetime_series.iloc[[0]]
result = s.sem(ddof=1)
assert pd.isna(result)
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
string_series = tm.makeStringSeries().rename("series")
alt = lambda x: skew(x, bias=False)
self._check_stat_op("skew", alt, string_series)
# test corner cases, skew() returns NaN unless there's at least 3
# values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.skew())
assert np.isnan(df.skew()).all()
else:
assert 0 == s.skew()
assert (df.skew() == 0).all()
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
string_series = tm.makeStringSeries().rename("series")
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op("kurt", alt, string_series)
index = pd.MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
s = Series(np.random.randn(6), index=index)
tm.assert_almost_equal(s.kurt(), s.kurt(level=0)["bar"])
# test corner cases, kurt() returns NaN unless there's at least 4
# values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.kurt())
assert np.isnan(df.kurt()).all()
else:
assert 0 == s.kurt()
assert (df.kurt() == 0).all()
| bsd-3-clause |
aydevosotros/tradingMachine | DataManager.py | 1 | 1627 | #
# Copyright (c) 2015 by Antonio Molina García-Retamero. All Rights Reserved.
#
import requests
import datetime
import csv
import numpy as np
import matplotlib.pyplot as plt
class DataManager(object):
"""docstring for DataManager"""
def __init__(self, arg):
super(DataManager, self).__init__()
self.arg = arg
@staticmethod
def getQuotesYahoo(symbol):
url = "http://real-chart.finance.yahoo.com/table.csv?s={0}&a=00&b=1&c=2000&d={1}&e={2}&f={3}&g=d&ignore=.csv".format(symbol, datetime.datetime.now().month-1, datetime.datetime.now().day, datetime.datetime.now().year)
print(url)
r = requests.get(url)
content = r.text
rows = content.split("\n")[1:-1]
data = [[float(value) for value in row.split(',')[1:]] for row in rows]
dates = [x[0] for x in rows]
return [dates, data[::-1]]
@staticmethod
def byWiningDayTagging(quotes):
tags = []
for quote in quotes:
if quote[3] - quote[0] > 0:
tags.append(1)
else:
tags.append(-1)
return tags
if __name__ == '__main__':
# data = DataManager.getQuotesYahoo("FB")
data = DataManager.getQuotesYahoo("MSF")
dates = data[0]
data = data[1]
print(len(data))
fig = plt.figure()
plt.plot([x[5] for x in data])
# plt.show()
print(DataManager.byWiningDayTagging(data))
#Notes
## http://finance.yahoo.com/q/hp?s=FB&a=00&b=1&c=2008&d=06&e=31&f=2015&g=d&z=66&y=792
## http://real-chart.finance.yahoo.com/table.csv?s=FB&a=00&b=1&c=2008&d=06&e=31&f=2015&g=d&ignore=.csv
| gpl-2.0 |
fyffyt/scikit-learn | sklearn/decomposition/nmf.py | 100 | 19059 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
sonnyhu/scikit-learn | examples/neighbors/plot_species_kde.py | 39 | 4039 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
akhilpm/Masters-Project | autoencoder/mnistencode.py | 1 | 8878 | '''
Sparse Autoencoder
Author: Akhil P M
Courtesy: UFLDL stanford, Siddharth Agarwal
mail: [email protected]
'''
import numpy as np
import time
import math
import scipy
import scipy.io
import matplotlib.pyplot as plt
import scipy.optimize
import sys
from sklearn.datasets.mldata import fetch_mldata
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn import preprocessing
import matplotlib.gridspec as gridspec
no_of_func_calls = 1
verbosity_level = 0
class SparseAutoencoder(object):
def __init__(self, input_size, hidden_size, lambdaa, rho, beta):
""" initialize the parameters of the Autoencoder"""
self.input_size = input_size #no of input units
self.hidden_size = hidden_size #no of hidden units
self.lambdaa = lambdaa #network weight regularization factor
self.rho = rho # desired average activation of hidden units
self.beta = beta # weight of sparsity penalty term
#limits used to unroll theta into weights and biases
self.limit0 = 0
self.limit1 = hidden_size * input_size
self.limit2 = 2 * hidden_size * input_size
self.limit3 = 2 * hidden_size * input_size + hidden_size
self.limit4 = 2 * hidden_size * input_size + hidden_size + input_size
#initialize biase and weights
rand = np.random.RandomState(23455)
r = math.sqrt(6)/math.sqrt(input_size + hidden_size + 1)
W1 = np.asarray(rand.uniform(low=-r, high=r, size=(hidden_size, input_size)))
W2 = np.asarray(rand.uniform(low=-r, high=r, size=(input_size, hidden_size)))
b1 = np.zeros((hidden_size,1))
b2 = np.zeros((input_size,1))
#unroll all parameters into a single vector for optimization
self.theta = np.concatenate((W1.flatten(), W2.flatten(),
b1.flatten(), b2.flatten()))
print('======Autoencoder initialized===========')
def sparse_autoencoder_cost(self,theta,trainX):
'''computes the cost in an iteration'''
global verbosity_level
#m = no of attributes, n= no of datapoints
m,n = trainX.shape
total_cost=0.0
"""extract weights and biases from theta"""
W1 = theta[self.limit0 : self.limit1].reshape(self.hidden_size, self.input_size)
W2 = theta[self.limit1 : self.limit2].reshape(self.input_size, self.hidden_size)
b1 = theta[self.limit2 : self.limit3].reshape(self.hidden_size,1)
b2 = theta[self.limit3 : self.limit4].reshape(self.input_size,1)
"""perform a forward pass"""
act_hidden_layer = sigmoid(np.dot(W1, trainX) + b1)
act_output_layer = sigmoid(np.dot(W2, act_hidden_layer) + b2)
if verbosity_level == 2:
print('activations of hidden and output layers computed')
"""estimate avg activation of hidden units"""
rho_avg = np.sum(act_hidden_layer, axis=1)/n
diff = act_output_layer-trainX
sum_of_squares_error = 0.5 * np.sum(np.square(diff))/n
weight_deacay = 0.5 * self.lambdaa * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
KL_divergence = self.beta * np.sum(self.rho * np.log(self.rho/rho_avg) +
(1-self.rho) * np.log((1-self.rho)/(1-rho_avg)))
total_cost = sum_of_squares_error + weight_deacay + KL_divergence
"""compute error in hidden layer and output layer"""
delta3 = np.multiply(diff, np.multiply(act_output_layer, 1-act_output_layer))
KL_div_grad = self.beta*(-(self.rho/rho_avg) + ((1-self.rho)/(1-rho_avg)))
delta2 = np.multiply(np.dot(np.transpose(W2),delta3) +
np.transpose(np.matrix(KL_div_grad)), np.multiply(act_hidden_layer,1-act_hidden_layer))
"""compute the gradient"""
W1_grad = np.dot(delta2, np.transpose(trainX))
W2_grad = np.dot(delta3, np.transpose(act_hidden_layer))
b1_grad = np.sum(delta2, axis=1)
b2_grad = np.sum(delta3, axis=1)
W1_grad = W1_grad/n + self.lambdaa*W1
W2_grad = W2_grad/n + self.lambdaa*W2
b1_grad = b1_grad/n
b2_grad = b2_grad/n
W1_grad = np.array(W1_grad)
W2_grad = np.array(W2_grad)
b1_grad = np.array(b1_grad)
b2_grad = np.array(b2_grad)
"""unroll the gradients into a single vector"""
theta_grad = np.concatenate((W1_grad.flatten(), W2_grad.flatten(),
b1_grad.flatten(), b2_grad.flatten()))
if verbosity_level >= 1:
print('grad J(theta) computed')
return [total_cost,theta_grad]
def sigmoid(x):
return (1/(1 + np.exp(-x)))
def normalizeDataset(dataset):
""" Remove mean of dataset """
dataset = dataset - np.mean(dataset)
""" Truncate to +/-3 standard deviations and scale to -1 to 1 """
std_dev = 3 * np.std(dataset)
dataset = np.maximum(np.minimum(dataset, std_dev), -std_dev) / std_dev
""" Rescale from [-1, 1] to [0.1, 0.9] """
dataset = (dataset + 1) * 0.4 + 0.1
return dataset
def load_dataset(num_patches, patch_size):
'''utility function to load data set'''
global verbosity_level
print('======loading dataset=======\n')
mnist = fetch_mldata('MNIST original')
sss = StratifiedShuffleSplit(mnist.target, 1, test_size=0.1, train_size=20000, random_state=0)
for train_index, test_index in sss:
trainX, testX = mnist.data[train_index], mnist.data[test_index]
trainY, testY = mnist.target[train_index], mnist.target[test_index]
no_of_images = trainX.shape[0]
""" the dataset is originally read as dictionary, convert it to an array.
the resulting array is of shape[512,512,10].
no of images=10
image size = 512*512(gray scale)
"""
#dataset is of shape [64*10,000]
dataset = np.zeros((patch_size*patch_size, num_patches))
"""Randomly sample images"""
rand = np.random.RandomState(23455)
image_number = rand.randint(no_of_images, size = num_patches)
for i in xrange(num_patches):
""""get the patch indices """
index3 = image_number[i]
""""extract patch from original image"""
dataset[:,i] = trainX[index3]
if verbosity_level==2:
print('=========patches extracted========\n')
"""normalize the dataset(min max feature scaling is used)"""
#transpose 'dataset' to form attributes as columns of the matrix, since scaling
#is to be done featurewise
if verbosity_level==2:
print('***********scaling features to [0.1, 0.9] range***********\n')
#dataset = normalizeDataset(dataset)
dataset = dataset / 255.0
#dataset = np.transpose(dataset) # newsize = 10,000*64
#min_max_scaler = preprocessing.MinMaxScaler()
#dataset = min_max_scaler.fit_transform(dataset)
#dataset = np.transpose(dataset) #transpose to 64*10,000
print('======loading dataset : completed ========\n')
return dataset
def visualizeW1(opt_W1, input_patch_size, hidden_patch_size):
""" Add the weights as a matrix of images """
figure, axes = plt.subplots(nrows = hidden_patch_size, ncols = hidden_patch_size)
#plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)
index = 0
for axis in axes.flat:
""" Add row of weights as an image to the plot """
image = axis.imshow(opt_W1[index, :].reshape(input_patch_size, input_patch_size),
cmap = plt.cm.gray, interpolation = 'nearest')
axis.set_frame_on(False)
axis.set_axis_off()
index += 1
""" Show the obtained plot """
plt.show()
def execute_sparse_autoencoder(level):
'''main function'''
"""set values for the parameters of Autoencoder"""
start_time = time.time()
input_patch_size = 28 #size of sampled image patches
hidden_patch_size = 14 #size of representative image patches
rho = .01 # sparsity parameter(desired avg activation of hidden units)
num_patches = 10000 #no of training patches
lambdaa = 0.01 #weight decay parameter
beta = 0.1 # weight of the sparsity penalty term
max_iterations = 400 #maximum iterations for optimization
global verbosity_level #set the verbosity level
verbosity_level = level
error = 0.0
input_size = input_patch_size * input_patch_size
hidden_size = hidden_patch_size * hidden_patch_size
"""load the dataset and preprocess it"""
data_train = load_dataset(num_patches, input_patch_size)
"""initialize the Autoencoder"""
encoder = SparseAutoencoder(input_size, hidden_size, lambdaa, rho, beta)
"""do gradient checking to verify the correctness of implenentation"""
#error = scipy.optimize.check_grad(func, gradient, encoder.theta, encoder, data_train)
#print('error in gradient : %f\n' %(error))
if error < 0.001:
print('++++++++++++++gradient checking passed+++++++++++')
else:
sys.exit('***********gradient checking failed***********')
"""do the optimization using L-BFGS algoritm"""
opt_solution = scipy.optimize.minimize(encoder.sparse_autoencoder_cost,
encoder.theta, args=(data_train,), method='L-BFGS-B',
jac=True, options={'maxiter': max_iterations, 'disp' : True})
print('optimization success : %r\n' %(opt_solution.success))
opt_theta = opt_solution.x
opt_W1 = opt_theta[encoder.limit0 : encoder.limit1].reshape(hidden_size,input_size)
print('execution time : %f' %(time.time() -start_time))
"""visualize W1"""
visualizeW1(opt_W1, input_patch_size, hidden_patch_size)
if __name__ == '__main__':
execute_sparse_autoencoder(0) | mit |
dmires/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/tests/test_backend_ps.py | 10 | 2166 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import re
import six
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, knownfailureif
needs_ghostscript = knownfailureif(
matplotlib.checkdep_ghostscript()[0] is None,
"This test needs a ghostscript installation")
needs_tex = knownfailureif(
not matplotlib.checkdep_tex(),
"This test needs a TeX installation")
def _test_savefig_to_stringio(format='ps'):
buffers = [
six.moves.StringIO(),
io.StringIO(),
io.BytesIO()]
plt.figure()
plt.plot([0, 1], [0, 1])
plt.title("Déjà vu")
for buffer in buffers:
plt.savefig(buffer, format=format)
values = [x.getvalue() for x in buffers]
if six.PY3:
values = [
values[0].encode('ascii'),
values[1].encode('ascii'),
values[2]]
# Remove comments from the output. This includes things that
# could change from run to run, such as the time.
values = [re.sub(b'%%.*?\n', b'', x) for x in values]
assert values[0] == values[1]
assert values[1] == values[2].replace(b'\r\n', b'\n')
for buffer in buffers:
buffer.close()
@cleanup
def test_savefig_to_stringio():
_test_savefig_to_stringio()
@cleanup
@needs_ghostscript
def test_savefig_to_stringio_with_distiller():
matplotlib.rcParams['ps.usedistiller'] = 'ghostscript'
_test_savefig_to_stringio()
@cleanup
@needs_tex
def test_savefig_to_stringio_with_usetex():
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams['text.usetex'] = True
_test_savefig_to_stringio()
@cleanup
def test_savefig_to_stringio_eps():
_test_savefig_to_stringio(format='eps')
@cleanup
@needs_tex
def test_savefig_to_stringio_with_usetex_eps():
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams['text.usetex'] = True
_test_savefig_to_stringio(format='eps')
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-2.0 |
nrhine1/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
meco-group/omg-tools | omgtools/problems/globalplanner.py | 1 | 25921 | # This file is part of OMG-tools.
#
# OMG-tools -- Optimal Motion Generation-tools
# Copyright (C) 2016 Ruben Van Parys & Tim Mercy, KU Leuven.
# All rights reserved.
#
# OMG-tools is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
from ..basics.shape import Rectangle, Square, Circle
import time
from matplotlib import pyplot as plt
import numpy as np
class GlobalPlanner(object):
def __init__(self, environment):
pass
def get_path(self, environment, curr_state, goal_state):
# Implement this in the child classes
pass
def move_point_to_grid(x,y):
# move a point in world coordinates to the closest grid point
pass
class QuadmapPlanner(GlobalPlanner):
# global planner using a quadmap
def __init__(self,environment):
GlobalPlanner.__init__(self, environment)
raise NotImplementedError('Please implement this method!')
def get_path(self, environment, curr_state, goal_state):
raise NotImplementedError('Please implement this method!')
class AStarPlanner(GlobalPlanner):
# global planner using the A*-algorithm
def __init__(self, environment, n_cells, start, goal, options={}):
if isinstance(environment.room[0]['shape'], (Rectangle, Square)):
grid_width = environment.room[0]['shape'].width
grid_height = environment.room[0]['shape'].height
if 'position' in environment.room[0]:
grid_position = environment.room[0]['position']
else:
grid_position = [0, 0]
else:
raise RuntimeError('Environment has invalid room shape, only Rectangle or Square is supported')
# check if vehicle size needs to be taken into account while searching a global path
if 'veh_size' in options:
if not isinstance(options['veh_size'], list):
options['veh_size'] = [options['veh_size']]
if len(options['veh_size']) == 1:
self.veh_size = 2*[options['veh_size']]
else:
self.veh_size = options['veh_size']
else:
# must consist of an offset in x- and y-direction
self.veh_size = [0.,0.]
# make grid
if ((grid_width == grid_height) and (n_cells[0] == n_cells[1])):
self.grid = SquareGrid(size=grid_width, position=grid_position, n_cells=n_cells, offset=self.veh_size)
else:
self.grid = Grid(width=grid_width, height=grid_height, position=grid_position, n_cells=n_cells, offset=self.veh_size)
# occupy grid cells based on environment
blocked = self.grid.get_occupied_cells(environment)
self.grid.block(blocked)
# only grid points are reachable so move start and goal for global planner
self.start = self.grid.move_to_gridpoint(start)
self.goal = self.grid.move_to_gridpoint(goal)
# calculate diagonal moving cost for grid
theta = np.arctan(float(self.grid.cell_height)/self.grid.cell_width)
self.diag_cost = self.grid.cell_width / np.cos(theta)
def set_start(self, start):
self.start = start
def set_goal(self, goal):
self.goal = goal
def calculate_g_cost(self, node):
if node.parent is not None:
# get movement direction from parent to node
x,y = node.pos
par_x, par_y = node.parent.pos
if x == par_x and y == par_y:
raise ValueError('Parent and node have the same position, something is wrong!')
if x != par_x and y == par_y:
# horizontal movement
g_cost = self.grid.cell_width
elif x == par_x and y != par_y:
# verical movement
g_cost = self.grid.cell_height
elif x != par_x and y != par_y:
# diagonal movement
g_cost = self.diag_cost
g_cost += node.parent.g_cost
return g_cost
def calculate_h_cost(self, node):
# h_cost is determined by horizontal and vertical distance from current node to goal node
# this is called the Manhattan way of determining the h cost
h_cost_x = abs(self.goal[0] - node.pos[0])*self.grid.cell_width
h_cost_y = abs(self.goal[1] - node.pos[1])*self.grid.cell_height
h_cost = h_cost_x + h_cost_y
return h_cost
def calculate_f_cost(self, node):
return node.g_cost + node.h_cost
def get_lowest_f_cost_node(self):
# find the cheapest node to go to
cost = np.inf
cheapest_node = None
for node in self.open_list:
if node.f_cost < cost:
cost = node.f_cost
cheapest_node = node
return cheapest_node
def remove_from_open_list(self, node):
for n in self.open_list:
if n == node:
self.open_list.remove(n)
break
def create_node(self, point, parent=None):
# creates a node on the location of point
return Node(point, parent)
def get_path(self, start=None, goal=None):
# main function of the A* algorithm
t1 = time.time()
if start is not None:
# only grid points are reachable
self.start = self.grid.move_to_gridpoint(start)
if goal is not None:
self.goal = self.grid.move_to_gridpoint(goal)
# initialize A*-algorithm
self.current_node = self.create_node(self.start)
self.open_list = []
self.closed_list = [self.current_node]
while self.current_node.pos != self.goal:
# get positions of current node neighbours
neighbors = self.grid.get_neighbors(self.current_node.pos)
if not neighbors:
raise RuntimeError('The current node has no free neighbors! ' +
'Consider using more grid points.')
for point in neighbors:
# suppose that the gridpoint is not yet seen
new_point = True
for n in self.closed_list:
if point == n.pos:
# point is already in closed list
new_point = False
break
for n in self.open_list:
if point == n.pos:
# point is already in open list
dummy_node = Node(point, parent=self.current_node)
new_g_cost = self.calculate_g_cost(dummy_node)
if new_g_cost <= n.g_cost:
n.parent = self.current_node
n.g_cost = new_g_cost
n.h_cost = self.calculate_h_cost(n)
n.f_cost = self.calculate_f_cost(n)
new_point = False
break
if new_point:
# make a node for the point
new_node = self.create_node(point)
new_node.parent = self.current_node
new_node.g_cost = self.calculate_g_cost(new_node)
new_node.h_cost = self.calculate_h_cost(new_node)
new_node.f_cost = self.calculate_f_cost(new_node)
self.open_list.append(new_node)
self.current_node = self.get_lowest_f_cost_node()
self.remove_from_open_list(self.current_node)
self.closed_list.append(self.current_node)
if not self.open_list:
# current node is not the goal, and open list is empty
# check if there are neigbors left which you can reach and are not yet visited
neighbors = self.grid.get_neighbors(self.current_node.pos)
closed_list_pos = []
for node in self.closed_list:
closed_list_pos.append(node.pos)
if not neighbors or all([item in closed_list_pos for item in neighbors]):
# there are no neighbors which are accessible or they are all in the closed list,
# meaning that no path could be found
raise RuntimeError('There is no path from the desired start to the desired end node! ' +
'Consider using more grid points.')
t2 = time.time()
print('Elapsed time to find a global path: ', t2-t1)
# convert a set of nodes to a set of positions
path = self.closed_list_to_path()
nodes_pos = []
for node in path:
nodes_pos.append(node.pos)
nodes_pos.reverse()
# convert node positions (indices) to waypoint positions (physical values)
path = self.convert_node_to_waypoint(nodes_pos)
return path
def closed_list_to_path(self):
# convert closed list to a list of nodes, by moving over the parent of each node
current_node = self.closed_list[-1]
path = [current_node]
while current_node != self.closed_list[0]:
next_node = current_node.parent
path.append(next_node)
current_node = next_node
return path
def convert_node_to_waypoint(self, nodes):
# convert position of node (i.e. an index in a grid) to a physical position [m]
waypoints = []
if not isinstance (nodes[0], list): # make something like [[0,1]]
nodes = [nodes]
for node in nodes:
waypoint = [0,0]
waypoint[0] = self.grid.position[0] - self.grid.width*0.5 + self.grid.cell_width*0.5 + node[0]*self.grid.cell_width
waypoint[1] = self.grid.position[1] - self.grid.height*0.5 + self.grid.cell_height*0.5 + node[1]*self.grid.cell_height
waypoints.append(waypoint)
return waypoints
def plot_path(self, path):
# plot the computed path
posx = []
posy = []
for waypoint in path:
posx.append(waypoint[0])
posy.append(waypoint[1])
plt.plot(posx,posy)
plt.show()
class Node(object):
def __init__(self, position, parent=None):
self.pos = position # index of the point in the grid
self.parent = parent # how did you end up in this node
self.f_cost = 0
self.g_cost = 0
self.h_cost = 0
def get_parent(self):
return self.parent
def get_pos(self):
return self.pos
def get_g_cost(self):
return self.g_cost
def get_h_cost(self):
return self.h_cost
def get_f_cost(self):
return self.f_cost
class Grid(object):
# based on: http://www.redblobgames.com/pathfinding/a-star/implementation.html
def __init__(self, width, height, position, n_cells, offset=[0.,0.]):
self.occupied = [] # initialize grid as empty
self.width = width
self.height = height
self.position = position
self.n_cells = n_cells # number of cells in horizontal and vertical direction
self.cell_width = self.width*1./self.n_cells[0]
self.cell_height = self.height*1./self.n_cells[1]
self.offset = offset # blows up obstacles, e.g. to take the vehicle size into account in the grid
def in_bounds(self, point):
x, y = point
# cell number starts counting at 0, until n_cells-1
return 0 <= x < self.n_cells[0] and 0 <= y < self.n_cells[1]
def block(self, points):
# block cells given by indices/position in grid
if len(points) == 2 and isinstance(points[0], int):
points = [points]
for point in points:
if self.in_bounds(point):
# only add points which are in the bounds
self.occupied.append(point)
def free(self, point):
# check if a gridpoint is free
# i.e.: not occupied and in bounds
free = False
if ((not point in self.occupied) and (self.in_bounds(point))):
free = True
return free
def is_accessible(self, point1, point2):
# Check if you can reach point2 from point1. Diagonal movement along
# an edge of an occupied point is not allowed
# graphical illustration:
# 1----2----3
# | | |
# 4----x----6
# | | |
# 7----8----9
# if x represents an occupied point, then moving from e.g. 8 to 6 is not possible
accessible = True
# only possible to be free but not accessible if diagonal movement
if (point1[0] != point2[0] and point1[1] != point2[1]):
# if diagonal movement accessible may be False
accessible = False
# diagonal up right
if (point1[0] + 1 == point2[0] and point1[1] + 1 == point2[1]):
if (self.free([point1[0], point1[1] + 1]) and self.free([point1[0] + 1, point1[1]])):
accessible = True
# diagonal up left
elif (point1[0] - 1 == point2[0] and point1[1] + 1 == point2[1]):
if (self.free([point1[0], point1[1] + 1]) and self.free([point1[0] - 1,point1[1]])):
accessible = True
# diagonal down right
elif (point1[0] + 1 == point2[0] and point1[1] - 1 == point2[1]):
if (self.free([point1[0], point1[1] - 1]) and self.free([point1[0] + 1,point1[1]])):
accessible = True
# diagonal down left
elif (point1[0] - 1 == point2[0] and point1[1] - 1 == point2[1]):
if (self.free([point1[0], point1[1] - self.cell_height]) and self.free([point1[0] - 1,point1[1]])):
accessible = True
return accessible
def move_to_gridpoint(self, point):
# snap a certain point to the nearest unoccupied grid point
# i.e. you go from [m] to a certain index in the grid
# determine distance of point to all surrounding grid points
# find the amount of cell widths/heights which fits in the point
# this is the closest gridpoint
moved_point = [0, 0]
# remove offset, i.e. substract the bottom left gridpoint
moved_point[0] = point[0] - (self.position[0] - 0.5*self.width + 0.5*self.cell_width)
moved_point[1] = point[1] - (self.position[1] - 0.5*self.height + 0.5*self.cell_height)
# determine how many times point fits in cell dimensions, this gives the indices in the grid
# lowest possible index is zero
moved_point[0] = max(0, int(round(float(moved_point[0])/self.cell_width)))
moved_point[1] = max(0, int(round(float(moved_point[1])/self.cell_height)))
if not self.in_bounds(moved_point):
# if point still not in bounds, its index is too high
# index of last cell is self.n_cells-1, assign this to moved_point
moved_point[0] = min(moved_point[0], self.n_cells[0]-1)
moved_point[1] = min(moved_point[1], self.n_cells[1]-1)
if moved_point in self.occupied:
# closest grid point is occupied, check all neighbours of this point
points_to_check = [[moved_point[0]+1, moved_point[1]],
[moved_point[0]-1, moved_point[1]],
[moved_point[0], moved_point[1]+1],
[moved_point[0], moved_point[1]-1],
[moved_point[0]+1, moved_point[1]+1],
[moved_point[0]+1, moved_point[1]-1],
[moved_point[0]-1, moved_point[1]+1],
[moved_point[0]-1, moved_point[1]-1]]
# remove inaccessible points from points_to_check
points_to_check = list(filter(self.in_bounds, points_to_check))
points_to_check = list(filter(self.free, points_to_check))
# select closest point which is not occupied
if points_to_check is not None:
# worst case: only a diagonally placed cell is available
# --> distance to it is sqrt(cell_width**2+cell_height**2)
# use an upper bound on this to avoid taking sqrt a lot to
# initialize d_min
d_min = 2*max(self.cell_height, self.cell_width)
for p in points_to_check:
distance = self.distance_between_cells(p, moved_point)
if distance < d_min:
d_min = distance
moved_point = p
# convert position of moved_point to indices
return moved_point
def distance_between_cells(self, cell1, cell2):
if cell1 == cell2:
return 0
elif cell1[0] == cell2[0]:
return self.cell_height
elif cell1[1] == cell2[1]:
return self.cell_width
else:
return np.sqrt(self.cell_width**2 + self.cell_height**2)
def get_neighbors(self, point):
# get all the accessible neighbouring cells of a certain point
x, y = point
results = [[x+1, y], [x-1, y],
[x, y+1],[x, y-1],
[x-1, y+1], [x+1, y+1],
[x-1, y-1], [x+1, y-1]]
results = list(filter(self.in_bounds, results))
results = list(filter(self.free, results))
results = [x for x in results if self.is_accessible(point, x)]
return results
def get_occupied_cells(self, environment):
# blank out the grid points which are occupied by a certain obstacle
occupied_cells = []
cells = []
centers_x = np.arange(self.position[0]-self.width*0.5 + 0.5*self.cell_width,
self.position[0]+self.width*0.5 + 0.5*self.cell_width,self.cell_width)
centers_y = np.arange(self.position[1]-self.height*0.5 + 0.5*self.cell_height,
self.position[1]+self.height*0.5 + 0.5*self.cell_height, self.cell_height)
i, j = 0, 0
for x in centers_x:
for y in centers_y:
cells.append({'pos': [x,y], 'index': [i, j]})
j += 1
i += 1
j = 0
for obstacle in environment.obstacles:
# only look at stationary obstacles
if ((not 'trajectories' in obstacle.simulation) or (not 'velocity' in obstacle.simulation['trajectories'])
or (all(vel == [0.]*obstacle.n_dim for vel in obstacle.simulation['trajectories']['velocity']['values']))):
pos = obstacle.signals['position'][:,-1]
if isinstance(obstacle.shape, (Rectangle, Square)):
vertices = []
vertex_x = obstacle.shape.vertices[0]
vertex_y = obstacle.shape.vertices[1]
for k in range(len(vertex_x)):
if vertex_x[k] == min(vertex_x):
# take into account offset to avoid waypoints which are so close
# to obstacles that they are not reachable
v_x = vertex_x[k] + pos[0] - self.offset[0]
else:
# take into account offset to avoid waypoints which are so close
# to obstacles that they are not reachable
v_x = vertex_x[k] + pos[0] + self.offset[0]
if vertex_y[k] == min(vertex_y):
v_y = vertex_y[k] + pos[1] - self.offset[1]
else:
v_y = vertex_y[k] + pos[1] + self.offset[1]
vertices.append([v_x, v_y])
if isinstance(obstacle.shape, Circle):
r = obstacle.shape.radius
# approximate circle by a square and add these vertices
# take into account offset, e.g. to avoid waypoints which
# are closer to obstacles than the vehicle can reach
vertices = [[pos[0] + r + self.offset[0], pos[1] + r + self.offset[1]],
[pos[0] + r + self.offset[0], pos[1] - r - self.offset[1]],
[pos[0] - r - self.offset[0], pos[1] + r + self.offset[1]],
[pos[0] - r - self.offset[0], pos[1] - r - self.offset[1]]]
vertices = np.array(vertices)
vertices = np.round(vertices, 4) # rounding off vertex positions, for easier comparison below
occ_cells = []
for cell in cells:
blocked = False # boolean to indicate if cell is blocked
# calculate cell vertices
cell_vertices = []
cell_vertices.append([cell['pos'][0] - 0.5*self.cell_width, cell['pos'][1] - 0.5*self.cell_height])
cell_vertices.append([cell['pos'][0] + 0.5*self.cell_width, cell['pos'][1] - 0.5*self.cell_height])
cell_vertices.append([cell['pos'][0] - 0.5*self.cell_width, cell['pos'][1] + 0.5*self.cell_height])
cell_vertices.append([cell['pos'][0] + 0.5*self.cell_width, cell['pos'][1] + 0.5*self.cell_height])
cell_vertices = np.array(cell_vertices)
# cell center is inside the convex hull of the vertices of an obstacle
if (min(vertices[:,0]) < cell['pos'][0] < max(vertices[:,0]) and
min(vertices[:,1]) < cell['pos'][1] < max(vertices[:,1])):
occ_cells.append(cell)
else:
# check if any of the cell vertices are inside the convex hull of the obstacle vertices
for cell_v in cell_vertices:
if (min(vertices[:,0]) < cell_v[0] < max(vertices[:,0]) and
min(vertices[:,1]) < cell_v[1] < max(vertices[:,1])):
# avoid adding the same cell multiple times
if cell not in occ_cells:
occ_cells.append(cell)
blocked = True # cell is blocked, go to next cell
break
# check if any of the obstacle vertices are inside the convex hull of the cell vertices
if not blocked: # cell was not detected as blocked yet
for v in vertices:
if (min(cell_vertices[:,0]) < v[0] < max(cell_vertices[:,0]) and
min(cell_vertices[:,1]) < v[1] < max(cell_vertices[:,1])):
# avoid adding the same cell multiple times
if cell not in occ_cells:
occ_cells.append(cell)
break # one obstacle vertex is inside the cell, go to next cell
# if cell is found to be occupied, remove it, i.e. don't check again for next obstacle
for cell in occ_cells:
cells.remove(cell)
# add cells which are occupied by the obstacle to the collection of occupied cells
occupied_cells.extend(occ_cells)
# only return the indices of the occupied cells
occ_cells = []
for cell in occupied_cells:
occ_cells.append(cell['index'])
return occ_cells
def draw(self):
# draw the grid
plt.figure()
#plot centers
centers_x = np.arange(self.position[0]-self.width*0.5 + 0.5*self.cell_width,
self.position[0]+self.width*0.5 + 0.5*self.cell_width,self.cell_width)
centers_y = np.arange(self.position[1]-self.height*0.5 + 0.5*self.cell_height,
self.position[1]+self.height*0.5 + 0.5*self.cell_height, self.cell_height)
i, j = 0, 0
for x in centers_x:
for y in centers_y:
if [i,j] not in self.occupied:
plt.plot(x,y,'ro')
j += 1
i += 1
j = 0
#plot grid lines
x_bottom = self.position[0] - 0.5*self.width
x_top = self.position[0] + 0.5*self.width
y_bottom = self.position[1] - 0.5*self.height
y_top = self.position[1] + 0.5*self.height
# make int because number of lines can only be an integer
for k in range(int(self.width/self.cell_width)+1):
x_point = x_bottom + k*self.cell_width
plt.plot([x_point,x_point], [y_bottom,y_top], 'r-')
for k in range(int(self.height/self.cell_height)+1):
y_point = y_bottom + k*self.cell_height
plt.plot([x_bottom,x_top], [y_point,y_point], 'r-')
plt.draw()
class SquareGrid(Grid):
# special case of a normal Grid, width = height
def __init__(self, size, position, n_cells, offset=[0.,0.]):
# make a general grid, with square cell
Grid.__init__(self, size, size, position, n_cells, offset) | lgpl-3.0 |
benfred/implicit | benchmarks/benchmark_als.py | 1 | 5976 | """ test script to verify the CG method works, and time it versus cholesky """
from __future__ import print_function
import argparse
import json
import logging
from collections import defaultdict
import matplotlib.pyplot as plt
import scipy.io
import seaborn
from implicit._als import calculate_loss
from implicit.als import AlternatingLeastSquares
from implicit.nearest_neighbours import bm25_weight
try:
import implicit.gpu # noqa
has_cuda = True
except ImportError:
has_cuda = False
def benchmark_accuracy(plays):
output = defaultdict(list)
def store_loss(model, name):
def inner(iteration, elapsed):
loss = calculate_loss(plays, model.item_factors, model.user_factors, 0)
print("model %s iteration %i loss %.5f" % (name, iteration, loss))
output[name].append(loss)
return inner
for steps in [2, 3, 4]:
model = AlternatingLeastSquares(
factors=100, use_native=True, use_cg=True, regularization=0, iterations=25
)
model.cg_steps = steps
model.fit_callback = store_loss(model, "cg%i" % steps)
model.fit(plays)
if has_cuda:
model = AlternatingLeastSquares(
factors=100, use_native=True, use_gpu=True, regularization=0, iterations=25
)
model.fit_callback = store_loss(model, "gpu")
model.use_gpu = True
model.fit(plays)
model = AlternatingLeastSquares(
factors=100, use_native=True, use_cg=False, regularization=0, iterations=25
)
model.fit_callback = store_loss(model, "cholesky")
model.fit(plays)
return output
def benchmark_times(plays, iterations=3):
times = defaultdict(lambda: defaultdict(list))
def store_time(model, name):
def inner(iteration, elapsed):
print(name, model.factors, iteration, elapsed)
times[name][model.factors].append(elapsed)
return inner
output = defaultdict(list)
for factors in range(32, 257, 32):
for steps in [2, 3, 4]:
model = AlternatingLeastSquares(
factors=factors,
use_native=True,
use_cg=True,
regularization=0,
iterations=iterations,
)
model.fit_callback = store_time(model, "cg%i" % steps)
model.cg_steps = steps
model.fit(plays)
model = AlternatingLeastSquares(
factors=factors, use_native=True, use_cg=False, regularization=0, iterations=iterations
)
model.fit_callback = store_time(model, "cholesky")
model.fit(plays)
if has_cuda:
model = AlternatingLeastSquares(
factors=factors,
use_native=True,
use_gpu=True,
regularization=0,
iterations=iterations,
)
model.fit_callback = store_time(model, "gpu")
model.fit(plays)
# take the min time for the output
output["factors"].append(factors)
for name, stats in times.items():
output[name].append(min(stats[factors]))
return output
LABELS = {
"cg2": "CG (2 Steps/Iteration)",
"cg3": "CG (3 Steps/Iteration)",
"cg4": "CG (4 Steps/Iteration)",
"gpu": "GPU",
"cholesky": "Cholesky",
}
COLOURS = {
"cg2": "#2ca02c",
"cg3": "#ff7f0e",
"cg4": "#c5b0d5",
"gpu": "#1f77b4",
"cholesky": "#d62728",
}
def generate_speed_graph(
data,
filename="als_speed.png",
keys=["gpu", "cg2", "cg3", "cholesky"],
labels=None,
colours=None,
):
labels = labels or {}
colours = colours or {}
seaborn.set()
fig, ax = plt.subplots()
factors = data["factors"]
for key in keys:
ax.plot(
factors, data[key], color=colours.get(key, COLOURS.get(key)), marker="o", markersize=6
)
ax.text(factors[-1] + 5, data[key][-1], labels.get(key, LABELS[key]), fontsize=10)
ax.set_ylabel("Seconds per Iteration")
ax.set_xlabel("Factors")
plt.savefig(filename, bbox_inches="tight", dpi=300)
def generate_loss_graph(data, filename="als_speed.png", keys=["gpu", "cg2", "cg3", "cholesky"]):
seaborn.set()
fig, ax = plt.subplots()
iterations = range(1, len(data["cholesky"]) + 1)
for key in keys:
ax.plot(iterations, data[key], color=COLOURS[key], marker="o", markersize=6)
ax.text(iterations[-1] + 1, data[key][-1], LABELS[key], fontsize=10)
ax.set_ylabel("Mean Squared Error")
ax.set_xlabel("Iteration")
plt.savefig(filename, bbox_inches="tight", dpi=300)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Benchmark CG version against Cholesky",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--input",
type=str,
required=True,
dest="inputfile",
help="dataset file in matrix market format",
)
parser.add_argument("--graph", help="generates graphs", action="store_true")
parser.add_argument("--loss", help="test training loss", action="store_true")
parser.add_argument("--speed", help="test training speed", action="store_true")
args = parser.parse_args()
if not (args.speed or args.loss):
print("must specify at least one of --speed or --loss")
parser.print_help()
else:
plays = bm25_weight(scipy.io.mmread(args.inputfile)).tocsr()
logging.basicConfig(level=logging.DEBUG)
if args.loss:
acc = benchmark_accuracy(plays)
json.dump(acc, open("als_accuracy.json", "w"))
if args.graph:
generate_loss_graph(acc, "als_accuracy.png")
if args.speed:
speed = benchmark_times(plays)
json.dump(speed, open("als_speed.json", "w"))
if args.graph:
generate_speed_graph(speed, "als_speed.png")
| mit |
nkeim/runtrackpy | runtrackpy/run.py | 1 | 10463 | """Manage tracking of many movies in parallel."""
# Copyright 2013 Nathan C. Keim
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License, or (at
#your option) any later version.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, see <http://www.gnu.org/licenses>.
import os, json, time, datetime
import pandas
from util import DirBase, readSingleCfg
from .statusboard import format_td
def _runtracking(mov, cfg, progress=False):
"""Decide parameters for tracking and then run track2disk().
'cfg' is a dict that can contain tracking parameters in the 'quickparams' entry;
otherwise, they are loaded from disk.
To be run in a parallel worker. Expects to find the track2disk() function in
runtrackpy.track
"""
from runtrackpy.track import track2disk, get_window
with mov():
# Read parameters
if cfg.get('quickparams') is not None:
params = cfg['quickparams']
else:
params = readSingleCfg(cfg['paramsfilename'])
# Find image files
if cfg.get('frames_pattern') is not None:
framefiles = mov.p.glob(cfg['frames_pattern'])
else:
try:
framefiles = mov.framesRecord().filename.tolist()
except AttributeError:
raise RuntimeError('Automatic image filenames not available. Specify "frames_pattern"')
# Choose frames
if cfg.get('selectframes') is not None:
selectframes = cfg['selectframes']
else:
window = get_window()
lastframe = window['lastframe']
if lastframe == -1:
lastframe = len(framefiles)
selectframes = range(window['firstframe'], lastframe + 1)
track2disk(framefiles,
cfg['tracksfilename'], params, selectframes=selectframes,
statusfile=cfg['statusfilename'], progress=progress)
return mov.p
class TrackingRunner(object):
"""User interface for parallel tracking in IPython. Basic idea: run a specified
function (default _runtracking()) in a parallel worker for each movie directory
given, and monitor status of the tracking jobs.
'movie_dirs' is a list of directory names.
'load_balanced_view' is an IPython parallel processing view. If not specified, you
can use only the run() method below.
'tracksfilename' is the destination tracks file in each movie directory.
By convention it has the extension ".h5". Any needed subdirectories
will be created.
'quickparams' lets you pass a dictionary of tracking parameters.
If you do not specify it, the tracking parameters are loaded from
the file "trackpy.ini" in each movie directory. See the "track" module
for details of what parameters are required.
'frames_pattern' uses glob-style wildcards to specify image files, e.g.
"Frame_*.png"
'paramsfilename' is the name of the .ini file in each directory where parameters
are stored (ignored if 'quickparams' was given).
'statusfilename' and 'tracking_function' are not user-serviceable.
An instance can be constructed with 'from_objects()' if you would like to pass
your own instances of util.DirBase() or some work-alike class.
"""
# FIXME: Restructure so that self.movies is a collection of task objects.
# This would let options like 'quickparams' be set on a per-task basis.
def __init__(self, movie_dirs, load_balanced_view=None,
tracksfilename='bigtracks.h5',
quickparams=None, frames_pattern=None,
paramsfilename='trackpy.ini',
statusfilename='trackingstatus.json',
tracking_function=_runtracking):
"""If quickparams == None, use 'trackpy.ini' in each directory.
If frames_pattern == None, tries to obtain the file list from
the author's own custom movie class.
"""
self.movies = [DirBase(d) for d in movie_dirs]
self.tracksfilename = tracksfilename
self.statusfilename = statusfilename
self.paramsfilename = paramsfilename
self.frames_pattern = frames_pattern
self.quickparams = quickparams
self.tracking_function = tracking_function
self.parallel_results = []
self.parallel_results_mostrecent = {}
self.load_balanced_view = load_balanced_view
@classmethod
def from_objects(cls, objlist, *args, **kw):
"""Initializes a TrackingRunner from a list of DirBase-like objects"""
r = cls([], *args, **kw)
r.movies = objlist
return r
def _prepare_run_config(self, mov):
cfg = dict(quickparams=self.quickparams, tracksfilename=self.tracksfilename,
statusfilename=self.statusfilename, paramsfilename=self.paramsfilename,
frames_pattern=self.frames_pattern)
return mov, cfg
def submit(self, movie_index, clear_output=False):
"""Submit (or resubmit) a job to the load-balanced view.
'movie_index' references what you see from status_board().
If 'clear_output', delete the output and status files.
"""
mov = self.movies[movie_index]
if clear_output:
outputfile = mov.p / self.tracksfilename
if outputfile.exists():
outputfile.unlink()
statusfile = mov.p / self.statusfilename
if statusfile.exists():
statusfile.unlink()
pres = self.load_balanced_view.apply(self.tracking_function, *self._prepare_run_config(mov))
self.parallel_results.append((movie_index, pres))
self.parallel_results_mostrecent[movie_index] = pres
return pres
def start(self, clear_output=False):
"""Start jobs for all movies on an IPython load-balanced cluster view.
If 'clear_output', delete the output and status files.
"""
for i in range(len(self.movies)):
self.submit(i, clear_output=clear_output)
def abort(self, movie_index):
"""Cancel job. 'movie_index' references what you see from status_board().
"""
return self.parallel_results_mostrecent[movie_index].abort()
def run(self, movie_index, clear_output=False, progress=False):
"""Run job in the current process (not parallel).
If 'progress', display status updates."""
mov = self.movies[movie_index]
with mov():
if clear_output:
outputfile = mov.p / self.tracksfilename
if outputfile.exists():
outputfile.unlink()
statusfile = mov.p / self.statusfilename
if statusfile.exists():
statusfile.unlink()
return self.tracking_function(*self._prepare_run_config(mov),
progress=progress)
def display_outputs(self):
from IPython.parallel import TimeoutError
for i in range(len(self.movies)):
print 'Movie index {}'.format(i)
try:
print self.parallel_results_mostrecent[i].display_outputs()
except TimeoutError:
pass
def read_statuses(self):
"""Returns DataFrame of all status info"""
info = []
for mov in self.movies:
sfn = mov.p / self.statusfilename
try:
sf = open(sfn, 'r')
sfinfo = json.load(sf)
since_update = datetime.timedelta(0, time.time() - os.path.getmtime(sfn))
sfinfo['since_update'] = format_td(since_update)
except IOError:
sfinfo = {'working_dir': os.path.dirname(os.path.abspath(sfn)),
'status': 'waiting'}
since_update = None
if (mov.p / self.tracksfilename).exists():
sfinfo['output'] = 'yes'
else:
sfinfo['output'] = ''
if sfinfo['status'] != 'done':
if since_update is not None:
# heartbeat timeout is 10x frame interval, or 5 minutes,
# whichever is greater.
heartbeat_timeout = max(float(sfinfo.get('seconds_per_frame', 0)) * 10,
300)
if since_update.total_seconds() > heartbeat_timeout:
sfinfo['status'] = 'DEAD'
else:
# If there is a tracks file but no status file, act confused.
if sfinfo['output']:
sfinfo['status'] = '??'
else:
# If there's no tracks file, assume the status is from an old run
if not sfinfo['output']:
sfinfo['status'] = 'waiting'
info.append(sfinfo)
return pandas.DataFrame(info)
def status_board(self):
"""Presents status info for a list of filenames.
Returns a DataFrame, which should display nicely.
"""
df = self.read_statuses().rename(columns={'seconds_per_frame': 'secs_per_frame'})
columns = ['working_dir', 'process_id',
'totalframes', 'mr_frame', 'secs_per_frame',
'elapsed_time', 'time_left', 'status', 'output',
'since_update']
for cn in columns:
if cn not in df:
df[cn] = ''
return df[columns]
def watch(self, interval=5):
"""Regularly updated status board."""
import IPython.display
try:
while True:
sb = self.status_board()
IPython.display.clear_output()
IPython.display.display_html(sb.to_html(na_rep=''), raw=True)
time.sleep(interval)
except KeyboardInterrupt:
IPython.display.clear_output()
IPython.display.display_html(sb.to_html(na_rep=''), raw=True)
print 'Last update: ' + datetime.datetime.now().strftime('%c')
return
| gpl-3.0 |
hunter-cameron/Bioinformatics | python/isolate_download_manager.py | 1 | 39133 |
import argparse
import sys
import os
import pandas
import getpass
import shutil
import tarfile
import logging
import subprocess
from Bio import SeqIO
from mypyli import utilities
from mypyli.jgi_interface import JGIInterface, JGIOrganism
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
class MissingDataError(ValueError):
""" Exception to raise when trying to get the path for data that isn't present. """
pass
class JGIDownloadError(Exception):
""" Blanket error to use when JGI download fails for any reason"""
pass
class DataReport(object):
""" Custom data type to store info about data"""
def __init__(self):
self.found_ids = []
self.missing_ids = []
self.extra_ids = []
self.invalid_files = []
self.duplicates = []
def __str__(self):
return "\n".join([ "Data Report:",
" Found ids: " + str(self.found_ids),
" Missing ids: " + str(self.missing_ids),
" Extra ids: " + str(self.extra_ids),
" Invalid files: " + str(self.invalid_files),
" Duplicates: " + str(self.duplicates)
])
def merge(cls, rep1, rep2):
""" Merges two reports and returns the merged version. """
merged = cls()
# I could do this merge in a for loop by accessing the internal dict
# however, I think its frowned upon to mess with the internals
merged.found_ids = rep1.found_ids + rep2.found_ids
merged.missing_ids = rep1.missing_ids + rep2.missing_ids
merged.extra_ids = rep1.extra_ids + rep2.extra_ids
class DataType(object):
""" DataType with associated paths and files """
def __init__(self, name):
self.name = name
self.files = {}
def __str__(self):
return "DataType {}".format(self.name)
def update(self):
""" Updates the files dict for whether or not each file exists """
for path in self.files:
self.files[path] = os.path.exists(path)
def add_data_file(self, path):
"""
Adds a file to the datatype
Prefix is a full path to the file
"""
if path in self.files:
LOG.warning("Refusing to add duplicate data path '{}' to {}".format(path, str(self)))
return
else:
self.files[path] = False
def get_missing(self):
""" Returns a list of missing files """
return [file for file in self.files if not self.files[file]]
def get_data_paths(self):
""" Returns a list of data paths """
return list(self.files.keys())
@property
def present(self):
""" Property that is True if all the data exists and false otherwise """
self.update()
for type_exists_bool in self.files.values():
if not type_exists_bool:
return False
else:
return True
class Isolate(object):
""" Representation of an isolate complete with methods to check for existing data and download more.
A major advantage (problem?) of this class is that is reports all failed data download attempts as warnings.
This is great for interactive use but if you are trying to collect failed downloads using a
program, it may be a little more difficult. I could add an option to suppress errors and the
warning would be logged if errors are suppressed, otherwise the error raised?
"""
DTYPES = ["bundle", "gbk", "genome", "blast_db", "genes", "ko", "cog", "pfam", "tigrfam", "interpro"]
def __init__(self, taxon_oid, name=None):
"""
Taxon_oid is required for online lookups.
Name is what you want to name member files as. Defaults to taxon_oid.
"""
self.taxon_oid = taxon_oid
# the name attrib will be what datafiles are named
if name:
self.name = name
else:
self.name = taxon_oid
# data fields
self.datatypes = {}
self.metadata = {}
# additional data fields for/from the database
self.database_data = {}
# stores a JGIOrganism to manage online data access
self.organism = None
def __str__(self):
return "Isolate: {}".format(self.name)
def get_data_paths(self, dtype):
""" Returns a list of the filepath(s) for a particular datatype"""
return self.datatypes[dtype].get_data_paths()
def make_organism(self, interface):
""" Creates a JGIOrganism object from the supplied interface """
self.organism = JGIOrganism(interface, taxon_oid=self.taxon_oid, prefix=self.taxon_oid)
# methods for looking for data files
def add_datatype(self, dtype, pre_suf):
""" Adds a DataType object to the datatypes attribute
datatype - a string name for the datatype
pre_suf - a tuple or list of tuples with a prefix (full directory path)
and suffix (extension) for each data file to put in the datatype
"""
try:
datatype = self.datatypes[dtype]
except KeyError:
datatype = DataType(dtype)
# convert pre_suf to a list is it isn't already
if isinstance(pre_suf, tuple):
pre_suf = [pre_suf]
for tup in pre_suf:
prefix, suffix = tup
# add a forward slash to the path if necessary
if not prefix.endswith("/"):
prefix += "/"
# get the full path using the isolate name
data_path = prefix + self.name + suffix
datatype.add_data_file(data_path)
self.datatypes[dtype] = datatype
def get_missing(self):
""" Returns a dict with missing datatypes as keys and lists of missing files as values """
missing_data = {}
for datatype in self.datatypes.values():
if not datatype.present:
missing_data[datatype.name] = datatype.get_missing()
return missing_data
# methods for adding new data
def update_metadata(self, overwrite=False):
""" Looks up metadata from JGI and stores it as an attribute """
if self.metadata:
if overwrite:
LOG.warning("Overwriting metadata for {}".format(str(self)))
else:
LOG.warning("Metadata already present for {}. Skipping lookup. Specify overwrite=True to force.".format(str(self)))
return
try:
self.metadata = self.organism.get_metadata()
except Exception as e:
LOG.warning("Could not download metadata for {}.\n\t{}".format(str(self), str(e)))
def update_data(self, dtype=None, overwrite=False):
""" Tries to get data according to the dtype; if dtype is none, tries to get all missing data """
if dtype is None:
if overwrite:
dtype = self.datatypes.keys()
else:
dtype = self.get_missing().keys()
else:
if isinstance(dtype, str):
dtype = [dtype]
#
## make sure the input dtypes are ok
#
filtered_dtype = []
for d in dtype:
# check if valid datatypes
if not d in self.datatypes:
LOG.warning("Datatype '{}' does not exist for {}. Omitting.".format(d, str(self)))
continue
# check if data is already present
elif self.datatypes[d].present:
if not overwrite:
LOG.warning("Datatype '{}' is already present for {}. Omitting. Specify 'overwrite=True' to overwrite.".format(d, str(self)))
continue
filtered_dtype.append(d)
dtype = filtered_dtype
#
## Start getting the data
#
# try to get the bundle first
if "bundle" in dtype:
try:
self._download_from_jgi("bundle")
except (AttributeError, JGIDownloadError) as e:
LOG.warning("Could not download 'bundle' for {}.\n\t{}".format(str(self), str(e)))
# data to get from bundle
if "genome" in dtype:
try:
self._extract_from_bundle("genome")
except (MissingDataError, IOError) as e:
LOG.warning("Could not extract 'genome' from 'bundle' for {}.\n\t{}".format(str(self), str(e)))
# data to download from JGI
for d in ["gbk", "ko", "cog", "pfam", "tigrfam", "interpro"]:
if d in dtype:
try:
self._download_from_jgi(d)
except (AttributeError, JGIDownloadError) as e:
LOG.warning("Could not download '{}' for {}.\n\t{}".format(d, str(self), str(e)))
# data I need to derrive from other files
if "genes" in dtype:
try:
self._gbk2faa()
except MissingDataError as e:
LOG.warning("Cannot process 'genes' for {}.\n\t".format(str(self), str(e)))
if "blast_db" in dtype:
try:
self._make_blast_db()
except (MissingDataError, RuntimeError) as e:
LOG.warning("Cannot 'makeblastdb' for {}.\n\t{}".format(str(self), str(e)))
def _download_from_jgi(self, dtype):
""" Downloads a datatype for a given organism """
# download what we can from JGI, should be try-excepted
# this doesn't try to change the name of the resultant file
# need to delete .tar.gz after done
LOG.debug("Trying to download {} from jgi for {}...".format(dtype, str(self)))
if dtype == "bundle":
try:
self.organism.download_data(("IMG Data", ".*.(tar.gz)$"))
except Exception as e:
raise JGIDownloadError("Download failed with error message:\n\t{}".format(str(e)))
tar = tarfile.open(self.taxon_oid + ".tar.gz", 'r:gz')
# we need the directory before the bundle dir to extract into
# this command joins the path with the parent dir (..) and then resolves an abs path
extract_dir = os.path.abspath(os.path.join(self.get_data_paths("bundle")[0], os.path.pardir))
tar.extractall(path=extract_dir)
os.remove(self.taxon_oid + ".tar.gz")
# download from IMG
if dtype in ['gbk', 'ko', 'cog', 'pfam', 'tigrfam', 'interpro']:
try:
self.organism.download_data(dtype)
except Exception as e:
raise JGIDownloadError("Download failed with error message:\n\t{}".format(str(e)))
# move the downloaded file to the proper location
shutil.move(self.taxon_oid + "." + self.organism.IMG_DATA_SUF[dtype],
self.get_data_paths(dtype)[0])
def _extract_from_bundle(self, dtype):
"""
Tries to extract a data type from the bundle.
Excepts FileNotFoundError if bundle doesn't exist or doesn't have the file
"""
# this is a hash that maps dtypes to filenames in the bundle
# I need to give the user more control over this without digging through the code
dtype_to_bundle = {
"genome": self.taxon_oid + ".fna",
}
LOG.debug("Trying to extract {} from bundle for {}...".format(dtype, str(self)))
# check if the bundle is available
if self.datatypes["bundle"].present:
bundle_path = self.get_data_paths("bundle")[0]
else:
raise MissingDataError("Cannot extract data from bundle; bundle has not been downloaded.")
# look for the file
for file in os.listdir(bundle_path):
if file == dtype_to_bundle[dtype]:
src = bundle_path + "/" + file
dest = self.get_data_paths(dtype)[0]
LOG.info("Copying {} to\t{}".format(src, dest))
try:
shutil.copy(src, dest)
break
except Exception as e:
raise IOError("Copying {} to {} failed. {}".format(src, dest, str(e)))
else:
raise MissingDataError("Filename {} not found in bundle.".format(dtype_to_bundle[dtype]))
def _make_blast_db(self):
""" Makes a blast database using the makeblastdb system command"""
LOG.debug("Trying to makeblastdb for {}...".format(str(self)))
# check if the genome is available
if self.datatypes["genome"].present:
fasta = self.get_data_paths("genome")[0]
else:
raise MissingDataError("'genome' data required for making blast database.")
# get the prefix
prefix = self.get_data_paths("blast_db")[0].rsplit(".", 1)[0]
status = subprocess.call(["makeblastdb",
"-dbtype", "nucl",
"-in", fasta,
"-out", prefix])
if status:
raise RuntimeError("makeblastdb command failed; make sure it is on $PATH")
else:
LOG.info("BLAST database successfully created!")
def _gbk2faa(self):
LOG.debug("Trying to extract genes for {}...".format(str(self)))
# check if the gbk is available
if self.datatypes["gbk"].present:
gbk = self.get_data_paths("gbk")[0]
else:
raise MissingDataError("'genome' data required for making blast database.")
# get the path for the output
genes = self.get_data_paths("genes")[0]
utilities.gbk2faa(gbk, genes)
@property
def organism(self):
if self._organism is None:
raise AttributeError("No JGIOrganism has been created for: {}".format(str(self)))
else:
return self._organism
@organism.setter
def organism(self, value):
self._organism = value
class IsolateManager(object):
""" Manages data files for isolates and keeps an up to date database of which files are downloaded. """
DATA_DIRS = ["bundle", "gbk", "genome", "blast_db", "genes", "ko", "cog", "pfam", "tigrfam", "interpro"]
DATA_EXT = {
"bundle": "/",
"gbk": ".gbk",
"genome": ".fna",
"genes": ".genes.faa",
"ko": ".ko.txt",
"cog": ".cog.txt",
"pfam": ".pfam.txt",
"tigrfam": ".tigrfam.txt",
"interpro": ".interpro.txt",
"blast_db": ".fna.nin" # this is special becuase it is actually just a prefix
}
EXT_TO_DTYPE = {
"/": "bundle",
"gbk": "gbk",
"fna": "genome",
"genes.faa": "genes",
"ko.txt": "ko",
"cog.txt": "cog",
"pfam.txt": "pfam",
"tigrfam.txt": "tigrfam",
"interpro.txt": "interpro",
"fna.nin": "blast_db",
"fna.nhr": "blast_db",
"fna.nsq": "blast_db",
}
DEFAULT_DATABASE = "isolate_database"
def __init__(self, dtype_and_ext, database_path="", base_dir=os.getcwd(), mkdir=False):
"""
Argument dtype_and_ext is required and is a list of tuples of (dtype, extension)
I'm requiring it in init because I want all the isolates to be initialized with the same data
"""
self.base_dir = base_dir
self.database_path = database_path
self.dtype_and_ext = dtype_and_ext
# container to hold Isolate objects
self.isolates = {}
# option about whether or not to force the creation of data directories
self.mkdir = mkdir
# stores a connection with JGI
self.jgi_interface = None
# check for default path and read that db if present, otherwise, prepare to write new one
if not self.database_path:
self.database_path = base_dir + "/" + self.DEFAULT_DATABASE
if os.path.isfile(self.database_path):
self.read_database()
else:
self.create_database()
if mkdir:
# make the base dir
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
for data_dir in self.DATA_DIRS:
full_path = base_dir + "/" + data_dir
if not os.path.isdir(full_path):
os.mkdir(full_path)
#######################
## Isolate based methods
#
def create_database(self, mkdir=False):
""" Sets up interface to use a new database """
LOG.info("Creating new database '{}'".format(self.database_path))
def read_database(self, taxon_oid_field="taxon_oid"):
""" Reads a database into Isolate Objects """
LOG.info("Reading database: {}...".format(self.database_path))
df = pandas.read_csv(self.database_path, sep="\t")
df = df.where((pandas.notnull(df)), None)
#df.fillna("NA", inplace=True)
# check if the taxon_oid field in in the database
if taxon_oid_field in df.columns:
# loop through all the entries in the database and make a dict of the values
for row in df.index:
database_dict = {}
for col in df.columns:
database_dict[col] = df.loc[row, col]
isolate = self.add_isolate(str(database_dict[taxon_oid_field]))
isolate.database_data = database_dict
def get_missing(self):
""" Returns a dict of missing files across all isolates """
all_data_paths = []
missing_dict = {}
for isolate in self.isolates.values():
isolate_missing = isolate.get_missing()
for dtype in isolate_missing:
try:
missing_dict[dtype].append(isolate.name)
except KeyError:
missing_dict[dtype] = [isolate.name]
return missing_dict
def _make_organisms(self):
for isolate in self.isolates.values():
isolate.make_organism(self.jgi_interface)
def update_data(self, dtype=None, overwrite=False):
for isolate in self.isolates.values():
isolate.update_data(dtype, overwrite)
def update_metadata(self, overwrite=False):
for isolate in self.isolates.values():
isolate.update_metadata(overwrite)
def build_database(self, dtype_to_database, database_to_database, metadata_to_database, order=None, replace=False):
"""
Draws on multiple data sources to generate a database.
Accepts 3 dicts to add fields to the database:
dtype_to_database - allows the user to rename dtypes to whatever they want
database_to_database - convert keys read in from previous database to field in this database
metadata_to_database - convery keys from metadata to a database field
Also accepts an order array for column names where the col names should be the set of all the
values in the 3 dicts.
Database is build using dtype first, then database, then metadata. Only empty fields will be filled.
This allows the metadata to be populated from the original database rather than having
to look it up each time.
To force metadata fields to replace existing fields, specify replace=True.
returns a pandas DataFrame
"""
# begin building dict for database
db_dict = {}
for index, isolate in self.isolates.items():
db_dict[index] = {}
# add data status - will either be True or False
for k, dk in dtype_to_database.items():
try:
db_dict[index][dk] = isolate.datatypes[k].present
except KeyError:
LOG.warning("{} didn't have '{}' as a listed datatype.".format(str(isolate), k))
db_dict[index][dk] = False
# add data from a previous database -- these should just be manual entry data fields
for k, dk in database_to_database.items():
try:
db_dict[index][dk] = isolate.database_data[k]
except KeyError:
LOG.warning("{} didn't have '{}' as a database key.".format(str(isolate), k))
db_dict[index][dk] = None
# add metadata - will either be a string or None
for k, dk in metadata_to_database.items():
# skip if the key is already present and not empty and replace isn't specified
if dk in db_dict[index]:
if db_dict[index][dk] is not None and not replace:
continue
try:
db_dict[index][dk] = isolate.metadata[k]
except KeyError:
LOG.warning("{} didn't have '{}' as a metadata key.".format(str(isolate), k))
db_dict[index][dk] = None
# coerce dict to a DataFrame
df = pandas.DataFrame.from_dict(db_dict, orient="index")
# sort the dataframe by the order array
if order:
# check for columns in order not in df; this is an error
for item in order:
if item not in df.columns:
raise ValueError("Column '{}' is present in the order array but not the dataframe.".format(item))
# check for columns in df not in order; this is a warning
for item in df.columns.tolist():
if item not in order:
LOG.warning("Column '{}' is present in the df but not in the order array, it will not be in the sorted dataframe.".format(item))
# return then ordered df
df = df[order]
return df
else:
return df
def add_isolate(self, taxon_oid):
"""
Adds an isolate to the dict of isolates this manager manages.
Returns the isolate for further modification.
"""
if taxon_oid not in self.isolates:
isolate = Isolate(taxon_oid)
# add all the data types
for data_tup in self.dtype_and_ext:
prefix = self.base_dir + "/" + data_tup[0]
isolate.add_datatype(data_tup[0], (prefix, data_tup[1]))
# add the connection to JGI if there is one
if self.jgi_interface:
isolate.make_organism(self.jgi_interface)
self.isolates[taxon_oid] = isolate
return isolate
else:
LOG.warning("Isolate '{}' already in isolate list. Refusing to add duplicate.".format(taxon_oid))
def connect_to_JGI(self, **kwargs):
"""
Creates a JGIInterface and logs in.
Valid kwargs are:
username -> JGI username
password -> JGI password
force_overwrite -> boolean for overwriting existing files
resume -> boolean for skipping existing files
newest_only -> boolean for downloading the newest file only when name conflicts occur
"""
# set kwarg defaults
kwarg_values = {
"username": "",
"password": "",
"force_overwrite": False,
"resume": False,
"newest_only": False
}
for key, value in kwargs.items():
try:
kwarg_values[key] = value
except KeyError:
raise ValueError("'{}' is not a valid kwarg.".format(key))
if not kwarg_values["username"]:
kwarg_values["username"] = input("JGI username (email): ")
if not kwarg_values ["password"]:
kwarg_values["password"] = getpass.getpass("JGI Password for {}: ".format(kwarg_values["username"]))
self.jgi_interface = JGIInterface(username=kwarg_values["username"],
password=kwarg_values["password"],
force_overwrite=kwarg_values["force_overwrite"],
resume=kwarg_values["resume"],
newest_only=kwarg_values["newest_only"]
)
self._make_organisms()
def check_for_new_isolates(self, projects):
"""
Checks for new isolates in a list of specfied projects.
Returns a dict of isolates not in database taxon_oid -> Isolate to allow easy
lookup of metadata to determine if the isolate should be added.
"""
# get all the taxon_oids from all the projects
taxon_oids = set()
for project in projects:
tids = self.jgi_interface.get_taxon_oids_for_proposal(project)
taxon_oids.update(tids)
# check for new ones
new_tids = {}
for taxon_oid in taxon_oids:
if taxon_oid not in self.isolates:
isolate = Isolate(taxon_oid, name=taxon_oid)
isolate.make_organism(self.jgi_interface)
new_tids[taxon_oid] = isolate
return new_tids
def make_all_blast_db(self):
""" Concatenates all genomic fastas and makes a blast database out of that. """
#
## Concatenate genomic fastas and prepend name to each header
#
LOG.info("Concatenating all files...")
tmp_fasta_name = "all_isolates.fna"
with open(tmp_fasta_name, "w") as OUT:
for isolate in self.isolates.values():
if isolate.datatypes["genome"].present:
# read the fasta file
with open(isolate.get_data_paths("genome")[0], "r") as IN:
LOG.debug(" Adding file from {}...".format(str(isolate)))
for record in SeqIO.parse(IN, "fasta"):
# change the header
record.id = isolate.name + "_" + record.id
# set description to id to avoid double printing
record.description = record.id
SeqIO.write(record, OUT, "fasta")
#
## Make a mock isolate to access the mkblastdb method
#
all = Isolate(None, "all_isolates")
all.add_datatype("genome", (self.base_dir + "/", ".fna"))
all.add_datatype("blast_db", [tup for tup in self.dtype_and_ext if tup[0] == "blast_db"])
all.update_data("blast_db", overwrite=True)
# remove temporary fasta
os.remove(tmp_fasta_name)
@staticmethod
def help():
""" Displays the help message. """
print("""
Isolate Download Manager initialized as 'manager'.
Other notable variables:
dtype_to_database -> dict mapping datatypes to field names for tabular output
metadata_to_database -> dict mapping metadata keys (on IMG organism summary) to field names
database_to_database -> dict mapping fields in a previous database to field names in this db
field_order -> list that contains the order of all the fields from the 3 dicts above
projects -> list of the projects the isolates are currently from
You might want to:
- Add an isolate -> manager.add_isolate(taxon_oid)
- See missing files -> manager.get_missing()
- Connect to JGI -> manager.connect_to_JGI()
- Get missing data for all isolates -> manager.update_data()
- Get a specific data type -> manager.update_data("gbk")
- Get metadata for all isolates -> manager.update_metadata()
- Get a tabular representation of the isolates ->
df = manager.build_database(dtype_to_database, database_to_database, metadata_to_database, field_order)
- Print the tabular representation -> write_database(df, filename)
- Check for new isolates in existing projects (doesn't add them to database) ->
manager.check_for_new_isolates(projects)
- Display this message again -> manager.help()
""")
def write_database(df, filename):
""" Writes a pandas DataFrame to the specified filename. """
df.to_csv(path_or_buf=filename, sep="\t", na_rep='NA', header=True, index=True, index_label="taxon_oid")
class UserInterface(object):
""" Class for a user interface to this program. This is kept for educational purposes only. The method for using this script now is to run it in Ipython. """
STATES = { "main":
[
"Welcome to the Isolate Manager",
"==============================",
"0. Exit",
"1. Update Database",
"2. Write Database",
"3. Add Isolate"
],
"AddIsolate":
[
"Add Isolate from:",
"=================",
"0. Back",
"1. JGI Taxon OID",
],
"GetData":
[
"Get Data",
"========",
"0. Main Menu",
"1. Get all",
"2. Get local"
]
}
RESPONSES = { "main":
[
"Action:Exit",
"Action:UpdateDB",
"Action:WriteDB",
"State:AddIsolate"
],
"AddIsolate":
[
"State:main",
"Action:AddTaxonId",
],
"GetData":
[
"State:main",
"Action:GetDataAll",
"Action:GetDataLocal"
]
}
def __init__(self, manager):
self.manager = manager
self.running = True
self.current_state = "main"
def display(self):
""" Main loop of the user interface """
while self.running:
print(end="\n\n")
for prompt in self.STATES[self.current_state]:
print(prompt)
print()
response = input("Please enter your selection: ")
self.handle_response(response)
return self.manager
def handle_response(self, response):
# make sure response is an integer
try:
response = int(response)
except ValueError:
# switch to interactive mode
if response == "i":
self.running = False
return
print("Error: response must be an integer")
return
# make sure response is in the list of keys
try:
action = self.RESPONSES[self.current_state][response]
except IndexError:
print("Error: response must be in the set: " +
" ".join([str(i) for i in range(len(self.RESPONSES[self.current_state]))]))
return
# handle the response
res_type, command = action.split(":")
if res_type == "State":
self.current_state = command
elif res_type == "Action":
# main commands
if command == "Exit":
print("Goodbye.")
sys.exit()
elif command == "UpdateDB":
self.update_db()
elif command == "WriteDB":
self.write_db()
# add isolate commands
elif command == "AddTaxonId":
self.add_isolate_taxon_oid()
elif command == "AddProjId":
self.add_isolate_proj_id()
elif command == "AddUniqueId":
self.add_isolate_unique_id()
# get data commands
elif command == "GetDataAll":
self.get_data("all")
elif command == "GetDataLocal":
self.get_data("local")
def update_db(self):
print()
self.manager.update_database()
print()
print("Database updated.")
self.current_state = "GetData"
def write_db(self):
path = input("Please type the path to write the database (default=" + self.manager.database_path + ").\n")
if not path:
path = self.manager.database_path
self.manager.write_database(path)
print("Database written to: {}".format(path))
def add_isolate_taxon_oid(self):
isolate = input("Type the JGI taxonomy id: ")
if len(isolate) != 10:
print("JGI taxonomy ids have 10 digits. Refusing to accept this entry because it does not have 10 characters.")
return
else:
self.manager.add_isolate(isolate)
print("Isolate {} successfully added.".format(isolate))
def get_data(self, style):
if style == "all":
self.current_state == "main"
username = input("JGI username (email): ")
password = getpass.getpass("JGI Password: ")
jgi_interface = JGIInterface(username=username, password=password, newest_only=True)
self.manager.get_data(jgi_interface)
elif style == "local":
self.manager.get_data()
def get_ids_from_file(ids_f):
ids = []
with open(ids_f, 'r') as IN:
for line in IN:
ids.append(line.rstrip())
return ids
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Provides an interactive interface for managing/downloading a JGI/IMG based database. The ideal way to use this is by invoking this script using ipython.")
parser.add_argument("-db", "-database", help="a database mapping Taxon id to types of data on file. will be created if blank", default="")
parser.add_argument("-dir", "-base_dir", help="the base isolates directory", default=os.getcwd());
parser.add_argument("-mkdir", help="make data directories if they don't exist", action="store_true")
parser.add_argument("-ids", help="list of ids (or a file with a list) to check, this is optional if there is an established database", nargs="*")
args = parser.parse_args()
args.dir = os.path.abspath(args.dir)
# make a list of datatype and file extensions
# this can be tailored to look for less data
dtype_and_ext = [
("bundle", "/"),
("gbk", ".gbk"),
("genome", ".fna"),
("genes", ".genes.faa"),
("ko", ".ko.txt"),
("cog", ".cog.txt"),
("pfam", ".pfam.txt"),
("tigrfam", ".tigrfam.txt"),
("interpro", ".interpro.txt"),
("blast_db", ".fna.nin"),
("blast_db", ".fna.nhr"),
("blast_db", ".fna.nsq")
]
# options to use when making a database
field_order = [ "freezer_id",
"organism_name",
"project",
"sequencing_center",
"type",
"status",
"gram_staining",
"lineage",
"bundle",
"gbk",
"genome",
"genes",
"ko",
"cog",
"pfam",
"tigrfam",
"interpro",
"blast_db"
]
database_to_database = {
"freezer_id": "freezer_id",
"organism_name": "organism_name",
"sequencing_center": "sequencing_center",
"type": "type",
"project": "project",
"gram_staining": "gram_staining",
"status": "status",
"lineage": "lineage"
}
metadata_to_database = {
"Organism Name": "organism_name",
"Sequencing Center": "sequencing_center",
"Culture Type": "type",
"Study Name (Proposal Name)": "project",
"Gram Staining": "gram_staining",
"Lineage": "lineage",
"Sequencing Status": "status"
}
# add all datatypes
dtype_to_database = {d[0]: d[0] for d in dtype_and_ext}
# set up the manager
manager = IsolateManager(dtype_and_ext, args.db, args.dir, args.mkdir)
field_map = {
"Organism Name": "organism_name",
"Sequencing Center:": "sequencing_center",
"Culture Type": "type",
"Study Name (Proposal Name)": "project",
"Gram Staining": "gram",
}
# current projects
projects = {
'Burkholderia Environmental Isolates',
'Plant associated metagenomes--Microbial community diversity and host control of community assembly across model and emerging plant ecological genomics systems.',
'Rhizosphere Grand Challenge Isolate Sequencing'
}
# load in any new isolates
if args.ids is not None:
for arg in args.ids:
if os.path.isfile(arg):
ids = get_ids_from_file(arg)
for id in ids:
manager.add_isolate(id)
else:
for id in args.ids:
manager.add_isolate(id)
print("""
Isolate Download Manager initialized as 'manager'.
Other notable variables:
dtype_to_database -> dict mapping datatypes to field names for tabular output
metadata_to_database -> dict mapping metadata keys (on IMG organism summary) to field names
database_to_database -> dict mapping fields in a previous database to field names in this db
field_order -> list that contains the order of all the fields from the 3 dicts above
projects -> set of the current projects known to include relevant isolates
You might want to:
- Add an isolate -> manager.add_isolate(taxon_oid)
- See missing files -> manager.get_missing()
- Connect to JGI -> manager.connect_to_JGI()
- Get missing data for all isolates -> manager.update_data()
- Get a specific data type -> manager.update_data("gbk")
- Get metadata for all isolates -> manager.update_metadata()
- Get a tabular representation of the isolates ->
manager.build_database(dtype_to_database, database_to_database, metadata_to_database, field_order)
""")
#manager.make_organisms(jgi_interface)
sys.exit()
ui = UserInterface(manager)
manager = ui.display()
#manager.update_database()
| mit |
Zhenxingzhang/AnalyticsVidhya | BigMartSales/DataMunging.py | 1 | 4657 | import pandas as pd
import numpy as np
from scipy.stats import mode
# Read files:
train = pd.read_csv("Data/Train_UWu5bXk.csv")
test = pd.read_csv("Data/Test_u94Q5KV.csv")
# Combine test and train into one file
train['source']='train'
test['source']='test'
data = pd.concat([train, test],ignore_index=True)
print train.shape, test.shape, data.shape
# Check missing values:
print data.apply(lambda x: sum(x.isnull()))
# Number of unique values in each:
print data.apply(lambda x: len(x.unique()))
# Determine the average weight per item:
item_avg_weight = data.pivot_table(values='Item_Weight', index='Item_Identifier')
# Get a boolean variable specifying missing Item_Weight values
miss_bool = data['Item_Weight'].isnull()
# Impute data and check #missing values before and after imputation to confirm
print 'Orignal #missing: %d'% sum(miss_bool)
data.loc[miss_bool,'Item_Weight'] = data.loc[miss_bool,'Item_Identifier'].apply(lambda x: item_avg_weight[x])
print 'Final #missing: %d'% sum(data['Item_Weight'].isnull())
#Determing the mode for each
outlet_size_mode = data.pivot_table(values='Outlet_Size', columns='Outlet_Type',aggfunc=(lambda x:mode(x).mode[0]) )
print 'Mode for each Outlet_Type:'
print outlet_size_mode
exit(0)
# Get a boolean variable specifying missing Item_Weight values
miss_bool = data['Outlet_Size'].isnull()
# Impute data and check #missing values before and after imputation to confirm
print '\nOrignal #missing: %d'% sum(miss_bool)
data.loc[miss_bool,'Outlet_Size'] = data.loc[miss_bool,'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
print sum(data['Outlet_Size'].isnull())
#Feature Engineering
#Check the mean sales by type:
data.pivot_table(values='Item_Outlet_Sales',index='Outlet_Type')
#Determine average visibility of a product
visibility_avg = data.pivot_table(values='Item_Visibility', index='Item_Identifier')
#Impute 0 values with mean visibility of that product:
miss_bool = (data['Item_Visibility'] == 0)
print 'Number of 0 values initially: %d'%sum(miss_bool)
data.loc[miss_bool,'Item_Visibility'] = data.loc[miss_bool,'Item_Identifier'].apply(lambda x: visibility_avg[x])
print 'Number of 0 values after modification: %d'%sum(data['Item_Visibility'] == 0)
#Determine another variable with means ratio
data['Item_Visibility_MeanRatio'] = data.apply(lambda x: x['Item_Visibility']/visibility_avg[x['Item_Identifier']], axis=1)
print data['Item_Visibility_MeanRatio'].describe()
#Item type combine:
data['Item_Identifier'].value_counts()
data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])
data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food',
'NC':'Non-Consumable',
'DR':'Drinks'})
data['Item_Type_Combined'].value_counts()
#Years:
data['Outlet_Years'] = 2013 - data['Outlet_Establishment_Year']
data['Outlet_Years'].describe()
#Change categories of low fat:
print 'Original Categories:'
print data['Item_Fat_Content'].value_counts()
print '\nModified Categories:'
data['Item_Fat_Content'] = data['Item_Fat_Content'].replace({'LF':'Low Fat',
'reg':'Regular',
'low fat':'Low Fat'})
print data['Item_Fat_Content'].value_counts()
#Mark non-consumables as separate category in low_fat:
data.loc[data['Item_Type_Combined']=="Non-Consumable",'Item_Fat_Content'] = "Non-Edible"
data['Item_Fat_Content'].value_counts()
#Import library:
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#New variable for outlet
data['Outlet'] = le.fit_transform(data['Outlet_Identifier'])
var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type','Outlet']
le = LabelEncoder()
for i in var_mod:
data[i] = le.fit_transform(data[i])
#One Hot Coding:
data = pd.get_dummies(data, columns=['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Outlet_Type',
'Item_Type_Combined','Outlet'])
# Exporting Data
#Drop the columns which have been converted to different types:
data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True)
#Divide into test and train:
train = data.loc[data['source']=="train"]
test = data.loc[data['source']=="test"]
#Drop unnecessary columns:
test.drop(['Item_Outlet_Sales','source'],axis=1,inplace=True)
train.drop(['source'],axis=1,inplace=True)
#Export files as modified versions:
train.to_csv("Data/train_modified.csv",index=False)
test.to_csv("Data/test_modified.csv",index=False) | apache-2.0 |
sinall/ShiPanE-Python-SDK | strategyease_sdk/guorn/client.py | 1 | 2995 | # -*- coding: utf-8 -*-
import time
import pandas as pd
import requests
from strategyease_sdk.base_quant_client import BaseQuantClient
from strategyease_sdk.models import *
class GuornClient(BaseQuantClient):
BASE_URL = 'https://guorn.com'
def __init__(self, **kwargs):
super(GuornClient, self).__init__('Guorn')
self._session = requests.Session()
self._username = kwargs.get('username', None)
self._password = kwargs.get('password', None)
self._sid = kwargs.get('sid', None)
self._timeout = kwargs.pop('timeout', (5.0, 10.0))
def login(self):
self._session.headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36',
'Referer': '{}'.format(self.BASE_URL),
'X-Requested-With': 'XMLHttpRequest',
'Origin': self.BASE_URL,
'Content-Type': 'application/json; charset=UTF-8',
}
self._session.get(self.BASE_URL, timeout=self._timeout)
response = self._session.post('{}/user/login'.format(self.BASE_URL), json={
'account': self._username,
'passwd': self._password,
'keep_login': 'true'
}, timeout=self._timeout)
self._session.headers.update({
'cookie': response.headers['Set-Cookie']
})
super(GuornClient, self).login()
def query_portfolio(self):
response = self._session.get('{}/stock/instruction'.format(self.BASE_URL), params={
'fmt': 'json',
'amount': 1000000,
'sid': self._sid,
'_': time.time()
}, timeout=self._timeout)
instruction = response.json()
status = instruction['status']
data = instruction['data']
if status == 'failed':
if isinstance(data, str):
raise Exception(data)
raise Exception("获取调仓指令数据失败")
df = pd.DataFrame()
sheet_data = instruction['data']['sheet_data']
if sheet_data is not None:
for row in sheet_data['row']:
df[row['name']] = pd.Series(row['data'][1])
meas_data = sheet_data['meas_data']
for index, col in enumerate(sheet_data['col']):
df[col['name']] = pd.Series(meas_data[index])
portfolio = Portfolio(total_value=1.0)
for index, row in df.iterrows():
security = row.get(u'股票代码') or row.get(u'基金代码')
value = row[u'目标仓位']
price = row[u'参考价']
amount = value / price
position = Position(security, price, amount, amount)
portfolio.add_position(position)
portfolio.rebalance()
return portfolio
| mit |
ZenDevelopmentSystems/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
zhenxu66/scipy2015-blaze-bokeh | viz2.py | 12 | 4846 | # -*- coding: utf-8 -*-
import math
from collections import OrderedDict
import numpy as np
import pandas as pd
import netCDF4
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import DatetimeTickFormatter, ColumnDataSource, HoverTool, Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.models.glyphs import Text, Rect
import utils.world_countries as wc
from utils.colormap import RGBAColorMapper
colormap = RGBAColorMapper(-6, 6, RdBu11)
def get_slice(t, year, month):
i = (year - 1850)*12 + month - 1
return colormap.color(t[i, :, :])
def climate_map():
data = netCDF4.Dataset('data/Land_and_Ocean_LatLong1.nc')
t = data.variables['temperature']
image = get_slice(t, 1950, 1)
world_countries = wc.data.copy()
worldmap = pd.DataFrame.from_dict(world_countries, orient='index')
# Create your plot
p = figure(width=900, height=500, x_axis_type=None, y_axis_type=None,
x_range=[-180,180], y_range=[-90,90], toolbar_location="left")
p.image_rgba(
image=[image],
x=[-180], y=[-90],
dw=[360], dh=[180], name='image'
)
p.patches(xs=worldmap['lons'], ys=worldmap['lats'], fill_color="white", fill_alpha=0,
line_color="black", line_width=0.5)
return p
def legend():
# Set ranges
xdr = Range1d(0, 100)
ydr = Range1d(0, 500)
# Create plot
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=100,
plot_height=500,
min_border=0,
toolbar_location=None,
outline_line_color="#FFFFFF",
)
# For each color in your palette, add a Rect glyph to the plot with the appropriate properties
palette = RdBu11
width = 40
for i, color in enumerate(palette):
rect = Rect(
x=40, y=(width * (i + 1)),
width=width, height=40,
fill_color=color, line_color='black'
)
plot.add_glyph(rect)
# Add text labels and add them to the plot
minimum = Text(x=50, y=0, text=['-6 ºC'])
plot.add_glyph(minimum)
maximum = Text(x=50, y=460, text=['6 ºC'])
plot.add_glyph(maximum)
return plot
def timeseries():
# Get data
df = pd.read_csv('data/Land_Ocean_Monthly_Anomaly_Average.csv')
df['datetime'] = pd.to_datetime(df['datetime'])
df = df[['anomaly','datetime']]
df['moving_average'] = pd.rolling_mean(df['anomaly'], 12)
df = df.fillna(0)
# List all the tools that you want in your plot separated by comas, all in one string.
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,previewsave"
# New figure
t = figure(x_axis_type = "datetime", width=1000, height=200,tools=TOOLS)
# Data processing
# The hover tools doesn't render datetime appropriately. We'll need a string.
# We just want dates, remove time
f = lambda x: str(x)[:7]
df["datetime_s"]=df[["datetime"]].applymap(f)
source = ColumnDataSource(df)
# Create plot
t.line('datetime', 'anomaly', color='lightgrey', legend='anom', source=source)
t.line('datetime', 'moving_average', color='red', legend='avg', source=source, name="mva")
# Style
xformatter = DatetimeTickFormatter(formats=dict(months=["%b %Y"], years=["%Y"]))
t.xaxis[0].formatter = xformatter
t.xaxis.major_label_orientation = math.pi/4
t.yaxis.axis_label = 'Anomaly(ºC)'
t.legend.orientation = "bottom_right"
t.grid.grid_line_alpha=0.2
t.toolbar_location=None
# Style hover tool
hover = t.select(dict(type=HoverTool))
hover.tooltips = """
<div>
<span style="font-size: 15px;">Anomaly</span>
<span style="font-size: 17px; color: red;">@anomaly</span>
</div>
<div>
<span style="font-size: 15px;">Month</span>
<span style="font-size: 10px; color: grey;">@datetime_s</span>
</div>
"""
hover.renderers = t.select("mva")
# Show plot
#show(t)
return t
# Add title
def title():
# Data
year = 1850
month = 1
years = [str(x) for x in np.arange(1850, 2015, 1)]
months = [str(x) for x in np.arange(1, 13, 1)]
months_str = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
month_str = months_str[month-1]
title = figure(width=1200, height=100, x_range=(0, 1200), y_range=(0, 100), toolbar_location=None,
x_axis_type=None, y_axis_type=None, outline_line_color="#FFFFFF", tools="", min_border=0)
title.text(x=500, y=5, text=[month_str], text_font_size='36pt', text_color='black',
name="month", text_font="Georgia")
title.text(x=350, y=5, text=[str(year)], text_font_size='36pt', text_color='black',
name="year",text_font="Georgia")
return title
| mit |
rethore/FUSED-Wake | fusedwake/WindFarm.py | 1 | 7962 | """An offshore wind farm model
@moduleauthor:: Juan P. Murcia <[email protected]>
"""
import numpy as np
MATPLOTLIB = True
try:
import matplotlib.pyplot as plt
except Exception as e:
MATPLOTLIB = False
print("WARNING: Matplotlib isn't installed correctly:", e)
from .WindTurbine import WindTurbineDICT
from windIO.Plant import WTLayout
class WindTurbineList(list):
"""A simple list class that can also act as a single element when needed.
Accessing one of the attribute of this list will get the first element of the
list attributes.
Note
----
We assume here that if a model call this intense as if it's one wind
turbine, the user has been clever enough to pass as input a wind farm with
identical turbines.
"""
def __getattr__(self, key):
#TODO: make some checks to catch possible bugs when the turbines are not similar.
return getattr(self.__getitem__(0), key)
def names(self):
return [getattr(w, 'name') for w in self]
class WindFarm(object):
def __init__(self, name=None, yml=None, coordFile=None, WT=None):
"""Initializes a WindFarm object.
The initialization can be done using a `windIO` yml file or using a
coodFile + WindTurbine instance.
Parameters
----------
name: str, optional
WindFarm name
yml: str, optional
A WindIO `yml` file containing the description of the farm
coordFile: str, optional
Wind Farm layout coordinates text file.
WindTurbine: WindTurbine, optional
WindTurbine object (only one type per WindFarm)
"""
if (coordFile):
coordArray = np.loadtxt(coordFile)
self.pos = coordArray.T # np.array(2 x nWT)
self.nWT = self.pos.shape[1]
self.WT = WindTurbineList([WT for i in range(self.nWT)])
if name:
self.name = name
else:
self.name = 'Unknown wind farm'
elif (yml):
self.wf = WTLayout(yml)
self.pos = self.wf.positions.T
self.nWT = self.pos.shape[1]
self.WT = WindTurbineList([WindTurbineDICT(wt, self.wf[wt['turbine_type']]) for wt in self.wf.wt_list])
self.name = self.wf.name
# We generate a wind turbine list
# XYZ position of the rotors
self.xyz = np.vstack([self.pos, self.H])
# Vector from iWT to jWT: self.vectWTtoWT[:,i,j] [3, nWT, nWT]
self.vectWTtoWT = np.swapaxes([self.xyz -
np.repeat(np.atleast_2d(self.xyz[:, i]).T, self.nWT, axis=1)
for i in range(self.nWT)], 0, 1)
def rep_str(self):
return "%s has %s %s wind turbines, with a total capacity of %4.1f MW"%(
self.name, self.nWT, self.WT.turbine_type, sum(self.rated_power)/1E3)
def __repr__(self):
sep = "-------------------------------------------------------------"
return '\n'.join([sep, self.rep_str(), sep])
def _repr_html_(self):
sep = "<br>"
return '\n'.join([sep, self.rep_str(), sep])
def turbineDistance(self, wd):
"""Computes the WT to WT distance in flow coordinates
ranks the most of most upstream turbines
Parameters
----------
wd: float
Wind direction in degrees
Returns
-------
distFlowCoord: Vector from iWT to jWT: self.vectWTtoWT[:,i,j]
idWT: ndarray(int)
turbine index array
"""
angle = np.radians(270.-wd)
ROT = np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
distFlowCoord = np.einsum('ij,jkl->ikl', ROT, self.vectWTtoWT[:2, :, :])
nDownstream = [(distFlowCoord[0, i, :] < 0).sum() for i in range(self.nWT)]
ID0 = np.argsort(nDownstream)
return distFlowCoord, nDownstream, ID0
def toFlowCoord(self, wd, vect):
"""Rotates a 2xN np.array to flow coordinates
Parameters
----------
wd: float
Wind direction in degrees
vect: ndarray
Vector or Matrix 2xN
Returns
-------
vect: ndarray
Vector or Matrix 2xN
"""
angle = np.radians(270.-wd)
ROT = np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
return np.dot(ROT, vect)
def get_T2T_gl_coord(self):
"""
Function to calculated the turbine to turbine distances in the global
coordinate system. (slower than version 2).
Parameters
----------
wt_layout GenericWindFarmTurbineLayout (FusedWind)
Returns
-------
x_g x component of distance between Tj and Ti := x_g[i,j]
y_g y component of distance between Tj and Ti := y_g[i,j]
z_g z component of distance between Tj and Ti := z_g[i,j]
"""
# Compute the turbine to turbine vector in global coordinates
x_g = np.zeros([self.nWT, self.nWT])
y_g = np.zeros([self.nWT, self.nWT])
z_g = np.zeros([self.nWT, self.nWT])
for i in range(self.nWT):
for j in range(self.nWT):
x_g[i,j] = self.xyz[0,j] - self.xyz[0,i]
y_g[i,j] = self.xyz[1,j] - self.xyz[1,i]
z_g[i,j] = self.xyz[2,j] - self.xyz[2,i]
return x_g,y_g,z_g
def get_T2T_gl_coord2(self):
"""
Function to calculated the turbine to turbine distances in the global
coordinate system. (faster).
Parameters
----------
wt_layout GenericWindFarmTurbineLayout (FusedWind)
Returns
-------
x_g x component of distance between Tj and Ti := x_g[i,j]
y_g y component of distance between Tj and Ti := y_g[i,j]
z_g z component of distance between Tj and Ti := z_g[i,j]
"""
x_g, y_g, z_g = self.vectWTtoWT
return x_g, y_g, z_g
def plot(self, WT_num=False):
""" # TODO
"""
if MATPLOTLIB:
x = (self.pos[0, :] - min(self.pos[0, :])) / (2. * self.WT.R)
y = (self.pos[1, :] - min(self.pos[1, :])) / (2. * self.WT.R)
fig, ax = plt.subplots()
ax.scatter(x, y, c='black')
if WT_num:
for i in range(0, self.nWT):
ax.annotate(i, (x[i], y[i]))
elif not WT_num:
print('No annotation of turbines')
ax.set_xlabel('x/D [-]')
ax.set_ylabel('y/D [-]')
ax.axis('equal')
ax.set_title(self.name)
return fig, ax
def plot_order(self, wd):
""" # TODO
"""
if MATPLOTLIB:
x = (self.pos[0, :] - min(self.pos[0, :])) / 1000
y = (self.pos[1, :] - min(self.pos[1, :])) / 1000
dist, nDownstream, idWT = self.turbineDistance(wd)
fig, ax = plt.subplots()
ax.scatter(x, y, c='black')
for i in range(0, self.nWT):
ax.annotate(int(idWT[i]), (x[i], y[i]))
ax.set_xlabel('x [km]')
ax.set_ylabel('y [km]')
ax.set_title(self.name+' Wind direction '+str(wd))
return fig, ax
def __getattr__(self, key):
"""Give access to a list of the properties of the turbine
Parameters
----------
key: str
The parameter to return
Returns
-------
parameters: list
The parameter list of the turbines
Example
-------
> wf = WindFarm(name='farm_name', yml=filename)
> wf.rotor_diameter
[80.0, 80.0, 80.0, 80.0, 80.0, ..., 80.0]
"""
# Detect the edge case if key is 'WT'
if not key in ['WT', 'nWT']:
return [getattr(wt, key) for wt in self.WT]
| agpl-3.0 |
drusk/pml | pml/utils/pandas_util.py | 1 | 3729 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Utility functions for working with pandas datatypes, i.e. DataFrames and
Series.
@author: drusk
"""
def find(series, search_for):
"""
Retrieves the indices of a pandas Series which have a specified value.
Args:
series: pandas.Series
The series to search and retrieve indices from.
search_for:
Either a single value to search for, or a list of several values
to search for. If it is a list, then if an index has any of the
specified values it will be included.
Returns:
indices: pandas.Index
The indices which held one of the specified values.
Example 1, search for single value:
series = pd.Series(["hostile", "friendly", "friendly", "neutral"],
index=["wolf", "cat", "dog", "mouse"])
indices = find(series, "friendly")
assert_that(indices, contains("cat", "dog")
Example 2, search for multiple values:
series = pd.Series(["hostile", "friendly", "friendly", "neutral"],
index=["wolf", "cat", "dog", "mouse"])
indices = find(series, ["friendly", "neutral"])
assert_that(indices, contains("cat", "dog", "mouse")
"""
# Convert values to search for to a list if they aren't
values = [search_for] if type(search_for) is not list else search_for
overall_matches = None
for value in values:
matches = (series == value)
if overall_matches is None:
overall_matches = matches
else:
overall_matches |= matches
return series.index[overall_matches]
def are_dataframes_equal(dataframe1, dataframe2):
"""
Compares two pandas DataFrame objects to see if they are equal.
Args:
dataframe1: pandas.DataFrame
dataframe2: pandas.DataFrame
Returns:
True if the DataFrames are identically-labeled and all elements are the
same. False otherwise.
"""
try:
# reduce 2d boolean array down to a single answer
return (dataframe1 == dataframe2).all().all()
except Exception:
# pandas throws:
# "Exception: Can only compare identically-labeled DataFrame objects"
return False
def is_series_numeric(series):
"""
Checks if the series data type is numeric.
Args:
series: pd.Series
The series whose data type will be checked.
Returns:
True if the series is numeric, i.e. values are some form of int or
float.
"""
dtype_name = series.dtype.name
return dtype_name.startswith("int") or dtype_name.startswith("float")
| mit |
ElDeveloper/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
UnitedThruAction/Data | Tools/CachingGeocoder.py | 1 | 2690 | """A very simple implementation of a caching geocoder using the Google Maps
(community-supported) Web Services Geocoding API.
Input: 1. Pandas dataframe containing a column named 'address_string'
2. Google Maps API key. See https://goo.gl/XSBqid
Output: Geopandas geodataframe containing a geometry column, made up of points
in CRS 84 coordinates.
A very simple, append-only cache is implemented in a CSV on the local disk.
@author [email protected]
@date 2017-09-04
"""
import sys
import googlemaps
import json
import pandas as pd
import geopandas as gpd
from tqdm import tqdm
from shapely.geometry import Point
from shapely.geometry import mapping, shape
def create_geodf(df, api_key):
# Populate cache
try:
geolog = pd.read_csv('geolocation_log.csv',
header=None, error_bad_lines=False,
names=['address_string', 'point_json'], quotechar="'")
geolog.drop_duplicates(inplace=True)
cache = {}
for index, row in geolog.iterrows():
cache[row.address_string] = shape(json.loads(row.point_json))
print "Loaded " + str(len(cache)) + " into cache"
except IOError:
cache = {}
print "No log found, continuing with empty cache"
sys.stdout.flush()
sys.stderr.flush()
# Now iterate through
gmaps = googlemaps.Client(key=api_key)
log = open('geolocation_log.csv', 'a+')
cache_hits, cache_misses = 0, 0
geometry_col = []
for index, row in tqdm(df.iterrows(), total=len(df)):
point = None
if row.address_string:
if row.address_string in cache:
point = cache[row.address_string]
cache_hits += 1
else:
geocode_result = gmaps.geocode(row.address_string)
cache_misses += 1
if geocode_result:
point = Point(geocode_result[0]['geometry']['location']['lng'],
geocode_result[0]['geometry']['location']['lat'])
try:
log.write("".join(["'", row.address_string,
"','", json.dumps(mapping(point)), "'\n"]))
except TypeError:
print "Error writing to log, continuing"
cache[row.address_string] = point
geometry_col.append(point)
log.close()
sys.stdout.flush()
sys.stderr.flush()
print str(cache_hits) + " cache hits, " + str(cache_misses) + " cache misses"
return gpd.GeoDataFrame(
df,
crs={
'init': 'epsg:4326'},
geometry=geometry_col)
| apache-2.0 |
subodhchhabra/airflow | airflow/contrib/hooks/bigquery_hook.py | 3 | 66015 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
from builtins import range
from past.builtins import basestring
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from apiclient.discovery import HttpError, build
from googleapiclient import errors
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq import read_gbq
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None,
use_legacy_sql=True):
super(BigQueryHook, self).__init__(
gcp_conn_id=bigquery_conn_id, delegate_to=delegate_to)
self.use_legacy_sql = use_legacy_sql
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: string
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: string in {'legacy', 'standard'}
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False)
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
verbose=False,
dialect='legacy'):
super(BigQueryPandasConnector, self).__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id, use_legacy_sql=True):
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
self.running_job_id = None
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning={},
labels=None
):
"""
Creates a new, empty table in the dataset.
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:return:
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if labels:
table_resource['labels'] = labels
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs={},
labels=None
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table name to create external table.
If <project> is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: string
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
if labels:
table_resource['labels'] = labels
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute()
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def run_query(self,
bql=None,
sql=None,
destination_dataset_table=False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=False,
udf_config=False,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
labels=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning={}):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: (Deprecated. Use `sql` parameter instead) The BigQuery SQL
to execute.
:type bql: string
:param sql: The BigQuery SQL to execute.
:type sql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:type destination_dataset_table: string
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: boolean
:type udf_config: list
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: integer
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: string
:param query_params a dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: dict
:param labels a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: string
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
# TODO remove `bql` in Airflow 2.0 - Jira: [AIRFLOW-2513]
sql = bql if sql is None else sql
if bql:
import warnings
warnings.warn('Deprecated parameter `bql` used in '
'`BigQueryBaseCursor.run_query` '
'Use `sql` parameter instead to pass the sql to be '
'executed. `bql` parameter is deprecated and '
'will be removed in a future version of '
'Airflow.',
category=DeprecationWarning)
if sql is None:
raise TypeError('`BigQueryBaseCursor.run_query` missing 1 required '
'positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
if use_legacy_sql is None:
use_legacy_sql = self.use_legacy_sql
configuration = {
'query': {
'query': sql,
'useLegacySql': use_legacy_sql,
'maximumBillingTier': maximum_billing_tier,
'maximumBytesBilled': maximum_bytes_billed,
'priority': priority
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
if query_params:
if self.use_legacy_sql:
raise ValueError("Query paramaters are not allowed when using "
"legacy SQL")
else:
configuration['query']['queryParameters'] = query_params
if labels:
configuration['labels'] = labels
time_partitioning = _cleanse_time_partitioning(
destination_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['query'].update({
'timePartitioning': time_partitioning
})
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['query'][
'schemaUpdateOptions'] = schema_update_options
return self.run_with_configuration(configuration)
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param labels a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs={},
time_partitioning={}):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table to load data into. If <project> is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: string
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error while polling job with id %s',
err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while (polling_attempts < max_polling_attempts and not job_complete):
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if (job_complete):
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif (polling_attempts == max_polling_attempts):
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute())
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute()
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
return source_dataset_resource
def delete_dataset(self,
project_id,
dataset_id
):
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset .
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:return:
"""
project_id = project_id if project_id is not None else self.project_id
self.log.info('Deleting from project: %s Dataset:%s',
project_id, dataset_id)
try:
self.service.datasets().delete(
projectId=project_id,
datasetId=dataset_id).execute()
self.log.info('Dataset deleted successfully: In project %s Dataset %s',
project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id, use_legacy_sql=True):
super(BigQueryCursor, self).__init__(
service=service,
project_id=project_id,
use_legacy_sql=use_legacy_sql)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute())
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(('{var}Use either : or . to specify project '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(('{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, ("{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
('{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}').format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info('Project not included in {var}: {input}; '
'using project "{project}"'.format(
var=var_name,
input=table_input,
project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(destination_dataset_table, time_partitioning_in):
# if it is a partitioned table ($ is in the table name) add partition load option
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
assert not time_partitioning_in.get('field'), (
"Cannot specify field partition and partition name "
"(dataset.table$partition) at the same time"
)
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
| apache-2.0 |
Tong-Chen/scikit-learn | sklearn/linear_model/coordinate_descent.py | 1 | 52883 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data
from ..utils import array2d, atleast2d_or_csc
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if Xy is None:
X = atleast2d_or_csc(X, copy=(copy_X and fit_intercept and not
sparse.isspmatrix(X)))
if not sparse.isspmatrix(X):
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
else:
n_samples = len(y)
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
return alphas
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=None,
normalize=None, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.16
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.16
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default True
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps and the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.16. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5],
... fit_intercept=False)
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
return_models=return_models, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.16
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.16
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default False
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps and the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.15. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
See also
--------
ElasticNet
ElasticNetCV
"""
if return_models:
warnings.warn("Use enet_path(return_models=False), as it returns the"
" coefficients and alphas instead of just a list of"
" models as previously `lasso_path`/`enet_path` did."
" `return_models` will eventually be removed in 0.16,"
" after which, returning alphas and coefs"
" will become the norm.",
DeprecationWarning, stacklevel=2)
if normalize is True:
warnings.warn("normalize param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
else:
normalize = False
if fit_intercept is True or fit_intercept is None:
warnings.warn("fit_intercept param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
if fit_intercept is None:
fit_intercept = True
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.ones(n_features)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy=False)
n_samples = X.shape[0]
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
if coef_init is None:
coef_ = np.zeros(n_features, dtype=np.float64)
else:
coef_ = coef_init
models = []
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
dual_gaps = np.empty(n_alphas)
tol = params.get('tol', 1e-4)
positive = params.get('positive', False)
max_iter = params.get('max_iter', 1000)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if sparse.isspmatrix(X):
coef_, dual_gap_, eps_ = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, positive)
else:
coef_, dual_gap_, eps_ = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, positive)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations')
coefs[:, i] = coef_
dual_gaps[i] = dual_gap_
if return_models:
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.coef_ = coefs[:, i]
model.dual_gap_ = dual_gaps[-1]
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
models.append(model)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_models:
return models
else:
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std,
coef_init=coef_[k], max_iter=self.max_iter)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1,
X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype: a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
precompute = path_params['precompute']
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
# del path_params['precompute']
path_params = path_params.copy()
path_params['fit_intercept'] = False
path_params['normalize'] = False
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = atleast2d_or_csc(X_train, dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y[train], **path_params)
del X_train
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean - np.dot(X_mean, coefs)
residues = safe_sparse_dot(X_test, coefs) - y_test[:, np.newaxis]
residues += intercepts[np.newaxis, :]
this_mses = (residues ** 2).mean(axis=0)
return this_mses, l1_ratio
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
# Dealing right with copy_X is important in the following:
# multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = atleast2d_or_csc(X, copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X)
copy_X = False
y = np.asarray(y, dtype=np.float64)
if y.ndim > 1:
raise ValueError("For multi-task outputs, fit the linear model "
"per output/task")
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
if alphas is None:
mean_l1_ratio = 1.
if hasattr(self, 'l1_ratio'):
mean_l1_ratio = np.mean(self.l1_ratio)
alphas = _alpha_grid(X, y, l1_ratio=mean_l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X)
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio, X_order='F',
dtype=np.float64)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
best_alpha = alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
self.alphas_ = np.asarray(alphas)
self.mse_path_ = np.squeeze(all_mse_paths)
# Refit the model with the parameters selected
model = ElasticNet()
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.fit(X, y)
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization chosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/manifold/isomap.py | 36 | 7119 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
jereze/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 69 | 8605 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py | 69 | 2207 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
| gpl-3.0 |
IssamLaradji/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
pysofe/pysofe | pysofe/visualization.py | 1 | 22194 | """
Provides some visualization capabilities.
"""
# IMPORTS
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
except ImportError as err:
# Could not import pyplot
# ... do some stuff here
raise err
# DEBUGGING
from IPython import embed as IPS
import numpy as np
import pysofe
from pysofe import utils
def show(obj, *args, **kwargs):
"""
Wrapper function for the visualization of
various pysofe objects.
Parameters
----------
obj
The pysofe object to visualize
"""
# select appropriate visualizer and call its `show()` method
if isinstance(obj, pysofe.elements.base.Element):
V = ElementVisualizer()
V.show(element=obj, **kwargs)
elif isinstance(obj, pysofe.meshes.mesh.Mesh):
V = MeshVisualizer()
V.show(obj, *args, **kwargs)
elif isinstance(obj, pysofe.quadrature.gaussian.GaussQuadSimp):
V = QuadRuleVisualizer()
V.show(obj, *args, **kwargs)
elif isinstance(obj, pysofe.spaces.space.FESpace):
V = FESpaceVisualizer()
V.show(obj, *args, **kwargs)
elif isinstance(obj, pysofe.spaces.functions.FEFunction):
V = FunctionVisualizer()
V.show(obj, **kwargs)
else:
raise NotImplementedError()
class Visualizer(object):
"""
Base class for all visualizers.
"""
def plot(self, *args, **kwargs):
fig, axes = self._plot(*args, **kwargs)
return fig, axes
def _plot(self, *args, **kwargs):
raise NotImplementedError()
def show(self, *args, **kwargs):
fig, axes = self.plot(*args, **kwargs)
fig.show()
class MeshVisualizer(Visualizer):
"""
Visualizes the :py:class:`pysofe.meshes.Mesh` class.
"""
def _plot(self, mesh, *args, **kwargs):
fontsize = kwargs.get('fontsize', 9)
fig = plt.figure()
ax = fig.add_subplot(111)
if mesh.dimension == 1:
nodes = mesh.nodes[:,0]
zeros = np.zeros_like(nodes)
ax.plot(nodes, zeros, '-o')
elif mesh.dimension == 2:
cols = range(3)
ax.triplot(mesh.nodes[:,0], mesh.nodes[:,1], np.asarray(mesh.cells[:,cols] - 1))
else:
raise NotImplementedError()
# zoom out to make outer faces visible
xlim = list(ax.get_xlim()); ylim = list(ax.get_ylim())
xlim[0] -= 0.1; xlim[1] += 0.1
ylim[0] -= 0.1; ylim[1] += 0.1
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show_all = ('all' in args)
# nodes
if 'nodes' in args or show_all:
for i in xrange(mesh.nodes.shape[0]):
if mesh.dimension == 1:
ax.text(x=mesh.nodes[i,0], y=0., s=i+1,
color='red', fontsize=fontsize)
elif mesh.dimension == 2:
ax.text(x=mesh.nodes[i,0], y=mesh.nodes[i,1], s=i+1,
color='red', fontsize=fontsize)
else:
raise NotImplementedError()
# edges
if 'edges' in args or show_all:
edges = mesh.edges
bary = 0.5 * mesh.nodes[edges - 1,:].sum(axis=1)
for i in xrange(edges.shape[0]):
if mesh.dimension == 1:
ax.text(x=bary[i,0], y=0, s=i+1,
color='green', fontsize=fontsize)
elif mesh.dimension == 2:
ax.text(x=bary[i,0], y=bary[i,1], s=i+1,
color='green', fontsize=fontsize)
# elements
if mesh.dimension > 1 and ('cells' in args or show_all):
cells = mesh.cells
bary = mesh.nodes[cells - 1,:].sum(axis=1) / 3.
for i in xrange(cells.shape[0]):
ax.text(x=bary[i,0], y=bary[i,1], s=i+1,
color='blue', fontsize=fontsize)
if 'local vertices' in args:
cells = mesh.cells
cell_nodes = mesh.nodes.take(cells - 1, axis=0)
bary = cell_nodes.sum(axis=1) / 3.
nE = cells.shape[0]
# calculate positions where to put the local vertex numbers
local_1 = cell_nodes[:,0] + 0.4 * (bary - cell_nodes[:,0])
local_2 = cell_nodes[:,1] + 0.4 * (bary - cell_nodes[:,1])
local_3 = cell_nodes[:,2] + 0.4 * (bary - cell_nodes[:,2])
for i in xrange(nE):
ax.text(x=local_1[i,0], y=local_1[i,1], s=1, color='red', fontsize=fontsize)
ax.text(x=local_2[i,0], y=local_2[i,1], s=2, color='red', fontsize=fontsize)
ax.text(x=local_3[i,0], y=local_3[i,1], s=3, color='red', fontsize=fontsize)
return fig, ax
class ElementVisualizer(Visualizer):
"""
Visualizes :py:class:`pysofe.elements.base.Element` classes.
"""
def _plot(self, element, **kwargs):
"""
Plots the basis function or their derivatives of the given element.
Parameters
----------
element : pysofe.base.Element
The finite element of which to plot the basis functions
codim : int
The codimension of the entity for which to plot the respective basis functions
d : int
The derivation order
indices : array_like
Specify certain basis function to show
resolution : int
Resolution of the grid points for the plot
typ : str
The plotting type ('surface' or 'scatter')
shadow : bool
Whether to plot a shadow of the surface
"""
# get arguments
dim = kwargs.get('dim', element.dimension)
d = kwargs.get('d', 0)
indices = kwargs.get('indices', None)
resolution = kwargs.get('resolution', 10*np.ceil(np.log(element.order+1)))
typ = kwargs.get('typ', 'surface')
shadow = kwargs.get('shadow', False)
layout = kwargs.get('layout', None)
if d != 0:
raise NotImplementedError()
if element.dimension > 2:
raise NotImplementedError()
codim = element.dimension - dim
if element.dimension == 1:
project = None
elif element.dimension == 2:
if codim == 0:
project = '3d'
elif codim == 1:
project = None
# create grid points at which to evaluate the basis functions
ls = np.linspace(0., 1., num=resolution)
if element.dimension == 1:
points = ls
elif element.dimension == 2:
if codim == 0:
X,Y = np.meshgrid(ls, ls)
XY = np.vstack([np.hstack(X), np.hstack(Y)])
points = XY.compress(XY.sum(axis=0) <= 1., axis=1)
elif codim == 1:
points = ls
# evaluate all basis function at all points
basis = element.eval_basis(points, deriv=d) # nB x nP
if indices is not None:
assert hasattr(indices, '__iter__')
indices = np.asarray(indices, dtype=int) - 1
assert indices.min() >= 0
basis = basis.take(indices, axis=0)
else:
indices = np.arange(np.size(basis, axis=0))
# create a subplot for each basis function
nB = np.size(basis, axis=0)
fig = plt.figure()
if layout is None:
nB_2 = int(0.5*(nB+1))
for i in xrange(1, nB_2+1):
if codim == 0:
fig.add_subplot(nB_2,2,2*i-1, projection=project)
if 2*i <= nB:
fig.add_subplot(nB_2,2,2*i, projection=project)
elif codim == 1:
fig.add_subplot(nB_2,2,2*i-1, projection=project)
if 2*i <= nB:
fig.add_subplot(nB_2,2,2*i, projection=project)
else:
assert 1 <= len(layout) <= 2
if len(layout) == 1:
layout = (1,layout[0])
assert np.multiply(*layout) >= nB
for j in xrange(nB):
if codim == 0:
fig.add_subplot(layout[0], layout[1], j+1, projection=project)
elif codim == 1:
fig.add_subplot(layout[0], layout[1], j+1, projection=project)
if element.dimension == 1:
for i in xrange(nB):
fig.axes[i].plot(points.ravel(), basis[i].ravel())
#fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(i+1), fontsize=32)
fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(indices[i]+1), fontsize=32)
elif element.dimension == 2:
if codim == 0:
for i in xrange(nB):
if typ == 'scatter':
fig.axes[i].scatter(points[0], points[1], basis[i])
elif typ == 'surface':
fig.axes[i].plot_trisurf(points[0], points[1], basis[i],
cmap=cm.jet, linewidth=0., antialiased=False)
if shadow:
c = fig.axes[i].tricontourf(points[0], points[1], basis[i],
zdir='z', offset=0., colors='gray')
fig.axes[i].autoscale_view(True,True,True)
#fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(i+1), fontsize=32)
fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(indices[i]+1), fontsize=32)
elif codim == 1:
for i in xrange(nB):
fig.axes[i].plot(points.ravel(), basis[i].ravel())
#fig.axes[i].set_title(r"$\psi_{{ {} }}$".format(i+1), fontsize=32)
fig.axes[i].set_title(r"$\psi_{{ {} }}$".format(indices[i]+1), fontsize=32)
return fig, fig.axes
class FunctionVisualizer(Visualizer):
'''
Base class for visualizing functions.
'''
def _plot(self, fnc, **kwargs):
'''
Plots the function.
Parameters
----------
...
'''
self.fnc = fnc
if fnc.fe_space.mesh.dimension == 1:
mode = '1dplot'
elif fnc.fe_space.mesh.dimension == 2:
mode = kwargs.get('mode', 'trisurface')
else:
raise NotImplementedError()
# get visualization data
#----------------------------------------------------
points, values, cells = self._get_visualizetion_data(mode, **kwargs)
# set up figure and axes
#----------------------------------------------------
# get number of plots
n_values = values.shape[0]
layout = kwargs.get('layout', None)
if layout is None:
if n_values == 1:
nrows = 1
ncols = 1
elif 1 < n_values < 9:
nrows = int(np.ceil(n_values/2.))
ncols = 2
else:
nrows = int(np.ceil(n_values/3.))
ncols = 3
else:
nrows, ncols = layout
# create figure and subplots (if neccessary)
fig = kwargs.get('fig', None)
axes = kwargs.get('ax', None)
if axes is None:
if mode in ('trisurface', 'surface', 'wireframe'):
subplot_kw = {'projection' : '3d'}
else:
subplot_kw = {}
fig, axes = plt.subplots(nrows, ncols, squeeze=False, subplot_kw=subplot_kw)
else:
axes = np.atleast_2d(axes)
assert axes.ndim == 2
assert nrows <= axes.shape[0]
assert ncols <= axes.shape[1]
# called plotting routine specified by `mode`
#----------------------------------------------------
if mode == '1dplot':
axes[0,0].plot(points[0], values[0])
elif mode == 'trisurface':
self._plot_trisurf(axes=axes, X=points[0], Y=points[1], triangles=cells,
Z=values, **kwargs)
elif mode in ('tripcolor', 'heatmap'):
self._plot_tripcolor(axes=axes, X=points[0], Y=points[1], triangles=cells,
Z=values, **kwargs)
return fig, axes
def _get_visualizetion_data(self, mode, **kwargs):
if mode in ('1dplot',):
local_points = np.linspace(0., 1., 10)[None,:]
points = self.fnc.fe_space.mesh.ref_map.eval(local_points)
_, I = np.unique(points.flat, return_index=True)
points = points.ravel().take(I)[None,:]
values = self.fnc(points=local_points, deriv=0).ravel().take(I)
values = np.atleast_2d(values)
cells = np.arange(points.size, dtype='int').repeat(2)[1:-1].reshape((-1,2))
return points, values, cells
elif mode in ('trisurface', 'tripcolor', 'heatmap'):
# get points, values and triangles for the plot
return self._get_triangulation_data(**kwargs)
else:
msg = "Invalid visualization mode for functions! ({})"
raise ValueError(msg.format(mode))
def _get_triangulation_data(self, **kwargs):
# generate local points for the function evaluation
n_sub_grid = kwargs.get('n_sub_grid', self.fnc.order + 1)
local_points = utils.lagrange_nodes(dimension=2, order=n_sub_grid)
# project them to their global counterparts
order = 'C'
points = self.fnc.fe_space.mesh.ref_map.eval(points=local_points,
deriv=0)
points = np.vstack([points[:,:,0].ravel(order=order), points[:,:,1].ravel(order=order)])
# get unique points indices
_, I = utils.unique_rows(points.T, return_index=True)
points = points.take(I, axis=1)
# evaluate the function w.r.t the unique points
d = kwargs.get('d', 0)
if 0:
if isinstance(self.fnc, pysofe.spaces.functions.FEFunction):
eval_local = kwargs.get('eval_local', True)
if eval_local:
values = self.fnc(points=local_points, d=d, local=True)
else:
values = self.fnc(points=points, d=d, local=False)
elif isinstance(self.fnc, pysofe.spaces.functions.MeshFunction):
values = self.fnc(points=points, d=d)
else:
fnc_args = kwargs.get('fnc_args', dict())
if kwargs.get('eval_local', True):
values = self.fnc(points=local_points, deriv=d, **fnc_args)
else:
values = self.fnc(points=points, d=d, local=False, **fnc_args)
if d == 0:
values = values.ravel(order=order).take(I, axis=0)
elif d == 1:
values = np.asarray([values.take(i, axis=-1).ravel(order=order).take(I, axis=0) for i in xrange(values.shape[-1])])
else:
raise ValueError('Invalid derivation order for visualization! ({})'.format(d))
# get cells corresponding to the unique points
from scipy.spatial import Delaunay
cells = Delaunay(points.T).simplices
values = np.atleast_2d(values)
return points, values, cells
def _plot_trisurf(self, axes, X, Y, triangles, Z, **kwargs):
'''
Wrapper for the :py:meth:`plot_trisurf` method of
the :py:class:`Axes3D` class.
Parameters
----------
X, Y : array_like
1D arrays of the triangulation node coordinates
triangles : array_like
Connectivity array of the triangulation
Z : array_like
1D array of the values at the triangulation nodes
'''
# set default values
cmap = kwargs.get('cmap', cm.jet)
# get layout
n_values = Z.shape[0]
nrows, ncols = axes.shape
# iterate over axes and plot
for i in xrange(nrows):
for j in xrange(ncols):
if i * ncols + j < n_values:
# call mpl_toolkit's plot_trisurf
axes[i,j].plot_trisurf(X, Y, triangles, Z[i * ncols + j],
shade=True, cmap=cmap,
linewidth=0., antialiased=False)
def _plot_tripcolor(self, axes, X, Y, triangles, Z, **kwargs):
'''
Wrapper for the :py:meth:`pyplot.tripcolor` method.
Parameters
----------
X, Y : array_like
1D arrays of the triangulation node coordinates
triangles : array_like
Connectivity array of the triangulation
Z : array_like
1D array of the values at the triangulation nodes
'''
# set default values
shading = kwargs.get('shading', 'flat')
cmap = kwargs.get('cmap', cm.jet)
axis_off = kwargs.get('axis_off', True)
# get layout
n_values = Z.shape[0]
nrows, ncols = axes.shape
# iterate over axes and plot
for i in xrange(nrows):
for j in xrange(ncols):
if i * ncols + j < n_values:
# call matplotlib.pyplot's tripcolor
axes[i,j].tripcolor(X, Y, triangles, Z[i * ncols + j],
shading=shading, cmap=cmap)
if axis_off:
# don't show axis
axes[i,j].set_axis_off()
class QuadRuleVisualizer(Visualizer):
"""
Visualizes the numerical integration scheme by plotting the
quadrature points.
"""
def _plot(self, quad_rule, *args, **kwargs):
assert isinstance(quad_rule, pysofe.quadrature.gaussian.GaussQuadSimp)
# get entity dimension for which to plot points
dim = kwargs.get('d', quad_rule.dimension)
if not dim in (1, 2):
msg = "Visualization not supported for this dimension, yet ({})"
raise ValueError(msg.format(dim))
# get quadrature points
points = quad_rule.points[dim]
# check if mesh is given
mesh = kwargs.get('mesh', None)
if mesh is not None and isinstance(mesh, pysofe.meshes.mesh.Mesh):
# is so, plot points on whole mesh
V = MeshVisualizer()
fig, axes = V.plot(mesh)
# transfer local points to global ponts on the mesh
points = np.vstack(mesh.ref_map.eval(points)).T
axes.plot(points[0], points[1], 'r.')
else:
# if not, plot points on reference domain
# set up figure and axes
fig = plt.figure()
axes = fig.add_subplot(111)
if dim == 1:
nodes = np.array([[0.], [1.]])
cells = np.array([[1, 2]])
axes.plot(nodes[:,0], np.zeros_like(nodes[:,0]))
axes.plot(points[0], np.zeros_like(points[0]), 'r.')
elif dim == 2:
nodes = np.array([[0., 0.], [1., 0.], [0., 1.]])
cells = np.array([[1, 2, 3]])
axes.triplot(nodes[:,0], nodes[:,1], cells-1)
axes.plot(points[0], points[1], 'r.')
# zoom out to make outer faces visible
xlim = list(axes.get_xlim()); ylim = list(axes.get_ylim())
xlim[0] -= 0.1; xlim[1] += 0.1
ylim[0] -= 0.1; ylim[1] += 0.1
axes.set_xlim(xlim)
axes.set_ylim(ylim)
return fig, axes
class FESpaceVisualizer(Visualizer):
"""
Visualizes the finite element space by plotting
its degrees of freedom.
"""
def _plot(self, fe_space, *args, **kwargs):
fontsize = kwargs.get('fontsize', 9)
# first plot the mesh
mesh = fe_space.mesh
V = MeshVisualizer()
fig, axes = V.plot(mesh)
# get number of entities for each topological dimension
n_entities = mesh.topology.n_entities
dof_tuple = fe_space.element.dof_tuple
n_dof_per_dim = np.asarray(n_entities) * dof_tuple
dofs = np.arange(fe_space.n_dof) + 1
entity_dofs = [zip(*(arr.reshape((dof_tuple[i], -1))))
for i, arr in
enumerate(np.split(dofs, n_dof_per_dim.cumsum()[:-1]))]
# plot dofs for each topological dimension
# nodes
for i in xrange(mesh.nodes.shape[0]):
if mesh.dimension == 1:
axes.text(x=mesh.nodes[i,0], y=0., s=entity_dofs[0][i],
color='red', fontsize=fontsize)
elif mesh.dimension == 2:
axes.text(x=mesh.nodes[i,0], y=mesh.nodes[i,1],
s=entity_dofs[0][i],
color='red', fontsize=fontsize)
else:
raise NotImplementedError()
# edges
edges = mesh.edges
bary = 0.5 * mesh.nodes[edges - 1,:].sum(axis=1)
for i in xrange(edges.shape[0]):
if mesh.dimension == 1:
axes.text(x=bary[i,0], y=0, s=entity_dofs[1][i],
color='red', fontsize=fontsize)
elif mesh.dimension == 2:
axes.text(x=bary[i,0], y=bary[i,1], s=entity_dofs[1][i],
color='red', fontsize=fontsize)
# elements
if mesh.dimension > 1:
cells = mesh.cells
bary = mesh.nodes[cells - 1,:].sum(axis=1) / 3.
for i in xrange(cells.shape[0]):
axes.text(x=bary[i,0], y=bary[i,1], s=entity_dofs[2][i],
color='red', fontsize=fontsize)
return fig, axes
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/e284.py | 2 | 4751 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
# max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2013-07-01"),
seq_length=512,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.7,
n_seq_per_batch=16,
# subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: mdn_nll(x, t).mean(),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
1000: 5e-05,
2000: 1e-05,
3000: 5e-06,
4000: 1e-06,
10000: 5e-07,
50000: 1e-07
},
plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': RecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': RecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
vipints/oqtans | oqtans_tools/EasySVM/0.3.3/scripts/plots.py | 2 | 10115 | #############################################################################################
# #
# This class is part of the MLB-Galaxy package, adding some sequence analysis #
# functionality to PSU's Galaxy framework. #
# Copyright (C) 2008 Cheng Soon Ong <[email protected]> #
# Copyright (C) 2008 Gunnar Raetsch <[email protected]> #
# Copyright (C) 2007 Sebastian J. Schultheiss <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#############################################################################################
# #
# Original Author: Sebastian J. Schultheiss, version 0.77 #
# Please add a notice of any modifications here: #
# Gunnar Raetsch: rewrote code for training on sequences to be read from files #
# Cheng Soon Ong: Added code for educational toolbox #
# Gunnar Raetsch: Added code for PRC curve #
# #
#############################################################################################
import sys
import random
import numpy
import warnings
import shutil
from shogun.Features import Labels
from shogun.Evaluation import *
def plotroc(output, LTE, draw_random=False, figure_fname="", roc_label='ROC'):
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(8,8))
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
output_a = numpy.array(output)
output_m = numpy.zeros((1,len(output_a))) ;
for i in range(len(output_a)): output_m[0][i]=output_a[i] ;
LTE_a = numpy.array(LTE)
idx = numpy.lexsort(output_m) ;
hits = numpy.zeros(len(output_a)) ;
false_alarms = numpy.zeros(len(output_a)) ;
LTEidxp=numpy.zeros(len(output_a)) ;
LTEidxn=numpy.zeros(len(output_a)) ;
nump=0 ;
numn=0 ;
for i in range(len(output_a)):
LTEidxp[i] = (LTE_a[idx[i]]>0)
LTEidxn[i] = (LTE_a[idx[i]]<0)
if LTE_a[i]>0: nump+=1
if LTE_a[i]<0: numn+=1
csLTEidxp=numpy.cumsum(LTEidxp)
csLTEidxn=numpy.cumsum(LTEidxn)
for i in range(len(output_a)):
hits[i]=1 - csLTEidxp[i]/nump
false_alarms[i]=1-csLTEidxn[i]/numn
pylab.plot(false_alarms, hits, 'b-', label=roc_label)
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
#points=pm.get_ROC()
#points=numpy.array(points).T # for pylab.plot
#pylab.plot(points[0], points[1], 'b-', label=roc_label)
if draw_random:
pylab.plot([0, 1], [0, 1], 'r-', label='random guessing')
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('1 - specificity (false positive rate)',size=10)
pylab.ylabel('sensitivity (true positive rate)',size=10)
pylab.legend(loc='lower right', prop = matplotlib.font_manager.FontProperties('tiny'))
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auROC=pm.get_auROC()
return auROC ;
def plotprc(output, LTE, figure_fname="", prc_label='PRC'):
import pylab
import matplotlib
pylab.figure(2,dpi=150,figsize=(8,8))
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_PRC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=prc_label)
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('sensitivity (true positive rate)',size=10)
pylab.ylabel('precision (1 - false discovery rate)',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auPRC=pm.get_auPRC()
return auPRC ;
def plotcloud(cloud, figure_fname="", label='cloud'):
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(8,8))
pos = []
neg = []
for i in xrange(len(cloud)):
if cloud[i][0]==1:
pos.append(cloud[i][1:])
elif cloud[i][0]==-1:
neg.append(cloud[i][1:])
fontdict=dict(family="cursive",weight="bold",size=10,y=1.05) ;
pylab.title(label, fontdict)
points=numpy.array(pos).T # for pylab.plot
pylab.plot(points[0], points[1], 'b+', label='positive')
points=numpy.array(neg).T # for pylab.plot
pylab.plot(points[0], points[1], 'rx', label='negative')
#pylab.axis([0, 1, 0, 1])
#ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
#pylab.xticks(ticks,size=10)
#pylab.yticks(ticks,size=10)
pylab.xlabel('dimension 1',size=10)
pylab.ylabel('dimension 2',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
def plot_poims(poimfilename, poim, max_poim, diff_poim, poim_totalmass, poimdegree, max_len):
"""Plot a summary of the information in poims"""
import pylab
import matplotlib
pylab.figure(3, dpi=150, figsize=(8,10))
# summary figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pylab.subplot(3,2,1)
pylab.title('Total POIM Mass', fontdict)
pylab.plot(poim_totalmass) ;
pylab.ylabel('weight mass', size=5)
pylab.subplot(3,2,3)
pylab.title('POIMs', fontdict)
pylab.pcolor(max_poim, shading='flat') ;
pylab.subplot(3,2,5)
pylab.title('Differential POIMs', fontdict)
pylab.pcolor(diff_poim, shading='flat') ;
for plot in [3, 5]:
pylab.subplot(3,2,plot)
ticks=numpy.arange(1., poimdegree+1, 1, dtype=numpy.float64)
ticks_str = []
for i in xrange(0, poimdegree):
ticks_str.append("%i" % (i+1))
ticks[i] = i + 0.5
pylab.yticks(ticks, ticks_str)
pylab.ylabel('degree', size=5)
# per k-mer figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.04) ;
# 1-mers
pylab.subplot(3,2,2)
pylab.title('1-mer Positional Importance', fontdict)
pylab.pcolor(poim[0], shading='flat') ;
ticks_str = ['A', 'C', 'G', 'T']
ticks = [0.5, 1.5, 2.5, 3.5]
pylab.yticks(ticks, ticks_str, size=5)
pylab.axis([0, max_len, 0, 4])
# 2-mers
pylab.subplot(3,2,4)
pylab.title('2-mer Positional Importance', fontdict)
pylab.pcolor(poim[1], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
ticks_str.append(l1+l2)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 16])
# 3-mers
pylab.subplot(3,2,6)
pylab.title('3-mer Positional Importance', fontdict)
pylab.pcolor(poim[2], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
for l3 in ['A', 'C', 'G', 'T']:
if numpy.mod(i,4)==0:
ticks_str.append(l1+l2+l3)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 64])
# x-axis on last two figures
for plot in [5, 6]:
pylab.subplot(3,2,plot)
pylab.xlabel('sequence position', size=5)
# finishing up
for plot in xrange(0,6):
pylab.subplot(3,2,plot+1)
pylab.xticks(fontsize=5)
for plot in [1,3,5]:
pylab.subplot(3,2,plot)
pylab.yticks(fontsize=5)
pylab.subplots_adjust(hspace=0.35) ;
# write to file
warnings.filterwarnings('ignore','Could not match*')
pylab.savefig('/tmp/temppylabfig.png')
shutil.move('/tmp/temppylabfig.png',poimfilename)
| bsd-3-clause |
mmagnus/EvoClustRNA | benchmark/evox_collect_data.py | 2 | 4012 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
example:
[mm] evo$ ./evox_collect_data.py -p ade
"""
from __future__ import print_function
import argparse
import os
import glob
import pandas
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', "--dryrun",
action="store_true", help="dry run", default=False)
#parser.add_argument('-p', '--path', help="", default='')
parser.add_argument('paths', nargs='*', help="", default='')
parser.add_argument('-c', '--case', help="only one case, for test")
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
return parser
def exe(cmd, dryrun):
print(cmd)
if not dryrun: os.system(cmd)
def main(dryrun, paths, case):
root = os.getcwd()
print(paths)
for path in paths:
print(path)
if path in ['ade', 'gmp', 'rp06', 'rp13', 'rp14', 'rp17', 'thf', 'tpp', 'trna']:
pass
else:
continue
if path:
os.chdir(path + '/evox/') # jump inside evox
cases = glob.glob('*')
# ade / evox / <type>
rmsd_motif = None
rmsds = None
infs = None
rmsd_motif = pandas.DataFrame()
for c in cases: # simulation!
# mode only for a specific case
if case: # only if this is used
if c != case:
print('!!! skip ' + c + '!!!')
continue
if os.path.isdir(c):
print('------------------------------')
print (' inside %s' % c)
os.chdir(c) # go inside a folder
print(os.getcwd())
# add column pdb case
# collect motif rmsd
fn = 'RMSD_motif.csv'
print(fn)
try:
df = pandas.read_csv(fn, index_col=None)
except:
## placeholder_fn = "/home/magnus/work/evo/rmsd_motif_fake.csv"
## print('Insert placeholder')
## df = pandas.read_csv(placeholder_fn, index_col=None)
os.chdir(root + '/' + path + '/evox/')
continue # skip
df['pdb'] = path
df['group_name'] = c
print('fn', fn)
print('df', df)
rmsd_motif = rmsd_motif.append(df)
print(rmsd_motif)
## try:
## rmsd_motif.append(df) # append to the df
## print(rmsd_motif)
## except AttributeError:
## rmsd_motif = df
## # add column pdb case
## # collect motif rmsd
## df = pandas.read_csv('rmsds.csv')
## df['group_name'] = case
## df['pdb'] = path
## if not rmsds:
## rmsds = df
## else:
## rmsds.append(df) # append to the df
## # add column pdb case
## # collect motif rmsd
## df = pandas.read_csv('inf.csv')
## df['group_name'] = case
## df['pdb'] = path
## if not infs:
## infs = df
## else:
## infs.append(df) # append to the df
os.chdir(root + '/' + path + '/evox/') # root + '/' + case) # path + '/evox/') # root)
print(rmsd_motif)
#print(rmsds)
#print(infs)
os.chdir(root)
rmsd_motif.to_csv(path + '_rmsd_motf.csv', index=False)
#rmsds.to_csv('rmsds.csv', index=False)
#infs.to_csv('infs.csv', index=False)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args.dryrun, args.paths, args.case)
| gpl-3.0 |
guschmue/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
AnasGhrab/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/axes_grid/demo_curvelinear_grid2.py | 15 | 1839 | import numpy as np
#from matplotlib.path import Path
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axes_grid.axislines import Subplot
import mpl_toolkits.axes_grid.angle_helper as angle_helper
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
sgn = np.sign(x)
x, y = np.abs(np.asarray(x)), np.asarray(y)
return sgn*x**.5, y
def inv_tr(x,y):
sgn = np.sign(x)
x, y = np.asarray(x), np.asarray(y)
return sgn*x**2, y
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = None,
lat_cycle = None,
lon_minmax = None, #(0, np.inf),
lat_minmax = None,
)
grid_helper = GridHelperCurveLinear((tr, inv_tr),
extreme_finder=extreme_finder)
ax1 = Subplot(fig, 111, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
ax1.imshow(np.arange(25).reshape(5,5),
vmax = 50, cmap=plt.cm.gray_r,
interpolation="nearest",
origin="lower")
# tick density
grid_helper.grid_finder.grid_locator1._nbins = 6
grid_helper.grid_finder.grid_locator2._nbins = 6
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
plt.show()
| mit |
abhishekgahlot/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.