repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sarahgrogan/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
larsbutler/swift | swift/common/middleware/xprofile.py | 8 | 9623 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6200/__profile__/all (object server)
http://localhost:6201/__profile__/current (container server)
http://localhost:6202/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library.
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data.
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
import six
from six.moves import urllib
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
ProfileException
from x_profile.html_viewer import HTMLViewer
from x_profile.profile_model import ProfileLog
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read()
query_dict.update(urllib.parse.parse_qs(qs_in_body,
keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
if isinstance(content, six.text_type):
content = content.encode('utf-8')
return [content]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
| apache-2.0 |
mojoboss/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
sinhrks/pandas-ml | pandas_ml/skaccessors/test/test_grid_search.py | 1 | 2125 | #!/usr/bin/env python
import numpy as np
import pandas as pd
import sklearn.datasets as datasets
import sklearn.grid_search as gs
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestGridSearch(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.grid_search.GridSearchCV, gs.GridSearchCV)
self.assertIs(df.grid_search.ParameterGrid, gs.ParameterGrid)
self.assertIs(df.grid_search.ParameterSampler, gs.ParameterSampler)
self.assertIs(df.grid_search.RandomizedSearchCV, gs.RandomizedSearchCV)
def test_grid_search(self):
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100]},
{'kernel': ['linear'], 'C': [1, 10, 100]}]
df = pdml.ModelFrame(datasets.load_digits())
cv = df.grid_search.GridSearchCV(df.svm.SVC(C=1), tuned_parameters, cv=5)
with tm.RNGContext(1):
df.fit(cv)
result = df.grid_search.describe(cv)
expected = pd.DataFrame({'mean': [0.97161937, 0.9476906, 0.97273233, 0.95937674, 0.97273233,
0.96271564, 0.94936004, 0.94936004, 0.94936004],
'std': [0.01546977, 0.0221161, 0.01406514, 0.02295168, 0.01406514,
0.01779749, 0.01911084, 0.01911084, 0.01911084],
'C': [1, 1, 10, 10, 100, 100, 1, 10, 100],
'gamma': [0.001, 0.0001, 0.001, 0.0001, 0.001, 0.0001,
np.nan, np.nan, np.nan],
'kernel': ['rbf'] * 6 + ['linear'] * 3},
columns=['mean', 'std', 'C', 'gamma', 'kernel'])
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_frame_equal(result, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
Tong-Chen/scikit-learn | examples/randomized_search.py | 57 | 3208 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
iris = load_digits()
X, y = iris.data, iris.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
q1ang/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
linebp/pandas | pandas/io/date_converters.py | 10 | 1827 | """This module is designed for community supported date conversion functions"""
from pandas.compat import range, map
import numpy as np
import pandas._libs.lib as lib
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
return lib.try_parse_date_and_time(date_col, time_col)
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return lib.try_parse_year_month_day(year_col, month_col, day_col)
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
second_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
hour_col = _maybe_cast(hour_col)
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
return lib.try_parse_datetime_components(year_col, month_col, day_col,
hour_col, minute_col, second_col)
def generic_parser(parse_func, *cols):
N = _check_columns(cols)
results = np.empty(N, dtype=object)
for i in range(N):
args = [c[i] for c in cols]
results[i] = parse_func(*args)
return results
def _maybe_cast(arr):
if not arr.dtype.type == np.object_:
arr = np.array(arr, dtype=object)
return arr
def _check_columns(cols):
if not len(cols):
raise AssertionError("There must be at least 1 column")
head, tail = cols[0], cols[1:]
N = len(head)
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError('All columns must have the same length: {0}; '
'column {1} has length {2}'.format(N, i, n))
return N
| bsd-3-clause |
likithkailas/StreamingSystems | python/pyspark/sql/dataframe.py | 1 | 61492 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Experimental.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Experimental
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20))
else:
print(self._jdf.showString(n, int(truncate)))
def __repr__(self):
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.collectToPython()
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.toPythonIterator()
return _load_from_socket(port, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: the default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: the default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions.
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
``numPartitions`` can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement, fraction, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
>>> df.sample(False, 0.5, 42).count()
2
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd = self._jdf.sample(withReplacement, fraction, long(seed))
return DataFrame(rdd, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, str):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default 'inner'.
One of `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no \
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
.. note:: Deprecated in 2.0, use union instead.
"""
return self.union(other)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, dict)):
raise ValueError("value should be a float, int, long, string, or dict")
if isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
:param to_replace: int, long, float, string, or list.
Value to be replaced.
If the value is a dict, then `value` is ignored and `to_replace` must be a
mapping from column name (string) to replacement value. The value to be
replaced must be an int, long, float, or string.
:param value: int, long, float, string, or list.
Value to use to replace holes.
The replacement value must be an int, long, float, or string. If `value` is a
list or tuple, `value` should be of the same length with `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if not isinstance(to_replace, (float, int, long, basestring, list, tuple, dict)):
raise ValueError(
"to_replace should be a float, int, long, string, list, tuple, or dict")
if not isinstance(value, (float, int, long, basestring, list, tuple)):
raise ValueError("value should be a float, int, long, string, list, or tuple")
rep_dict = dict()
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, tuple):
to_replace = list(to_replace)
if isinstance(value, tuple):
value = list(value)
if isinstance(to_replace, list) and isinstance(value, list):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length")
rep_dict = dict(zip(to_replace, value))
elif isinstance(to_replace, list) and isinstance(value, (float, int, long, basestring)):
rep_dict = dict([(tr, value) for tr in to_replace])
elif isinstance(to_replace, dict):
rep_dict = to_replace
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
elif isinstance(subset, basestring):
subset = [subset]
if not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of a numerical column of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
:param col: the name of the numerical column
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities
"""
if not isinstance(col, str):
raise ValueError("col should be a string.")
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
return list(jaq)
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no \
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param col: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
Note that this method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
This is only available if Pandas is installed and available.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
import pandas as pd
return pd.DataFrame.from_records(self.collect(), columns=self.columns)
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
basnijholt/holoviews | holoviews/plotting/plotly/__init__.py | 2 | 3501 | from __future__ import absolute_import, division, unicode_literals
from ...core.options import Store, Cycle, Options
from ...core import (Overlay, NdOverlay, Layout, NdLayout, GridSpace,
GridMatrix, config)
from ...element import * # noqa (Element import for registration)
from .renderer import PlotlyRenderer
from .annotation import * # noqa (API import)
from .element import * # noqa (API import)
from .chart import * # noqa (API import)
from .chart3d import * # noqa (API import)
from .raster import * # noqa (API import)
from .plot import * # noqa (API import)
from .stats import * # noqa (API import)
from .tabular import * # noqa (API import)
from ...core.util import LooseVersion, VersionError
import plotly
if LooseVersion(plotly.__version__) < '3.4.0':
raise VersionError(
"The plotly extension requires a plotly version >=3.4.0, "
"please upgrade from plotly %s to a more recent version."
% plotly.__version__, plotly.__version__, '3.4.0')
Store.renderers['plotly'] = PlotlyRenderer.instance()
if len(Store.renderers) == 1:
Store.set_current_backend('plotly')
Store.register({Points: ScatterPlot,
Scatter: ScatterPlot,
Curve: CurvePlot,
Area: AreaPlot,
Spread: SpreadPlot,
ErrorBars: ErrorBarsPlot,
# Statistics elements
Bivariate: BivariatePlot,
Distribution: DistributionPlot,
Bars: BarPlot,
BoxWhisker: BoxWhiskerPlot,
Violin: ViolinPlot,
# Raster plots
Raster: RasterPlot,
Image: RasterPlot,
HeatMap: HeatMapPlot,
QuadMesh: QuadMeshPlot,
# 3D Plot
Scatter3D: Scatter3DPlot,
Surface: SurfacePlot,
Path3D: Path3DPlot,
TriSurface: TriSurfacePlot,
Trisurface: TriSurfacePlot, # Alias, remove in 2.0
# Tabular
Table: TablePlot,
ItemTable: TablePlot,
# Annotations
Labels: LabelPlot,
# Container Plots
Overlay: OverlayPlot,
NdOverlay: OverlayPlot,
Layout: LayoutPlot,
NdLayout: LayoutPlot,
GridSpace: GridPlot,
GridMatrix: GridPlot}, backend='plotly')
options = Store.options(backend='plotly')
dflt_cmap = 'hot' if config.style_17 else 'fire'
point_size = np.sqrt(6) # Matches matplotlib default
Cycle.default_cycles['default_colors'] = ['#30a2da', '#fc4f30', '#e5ae38',
'#6d904f', '#8b8b8b']
# Charts
options.Curve = Options('style', color=Cycle(), line_width=2)
options.ErrorBars = Options('style', color='black')
options.Scatter = Options('style', color=Cycle())
options.Points = Options('style', color=Cycle())
options.Area = Options('style', color=Cycle(), line_width=2)
options.Spread = Options('style', color=Cycle(), line_width=2)
options.TriSurface = Options('style', cmap='viridis')
# Rasters
options.Image = Options('style', cmap=dflt_cmap)
options.Raster = Options('style', cmap=dflt_cmap)
options.QuadMesh = Options('style', cmap=dflt_cmap)
options.HeatMap = Options('style', cmap='RdBu_r')
| bsd-3-clause |
Sharpie/python-kitchensink | ks/time/_pandas.py | 1 | 1754 | # -*- coding: utf-8 -*-
"""
ks.time.util
~~~~~~~~~~~~
Utility functions for dealing with Pandas timestamps.
:copyright: (c) 2010 -- 2012 by Charlie Sharpsteen
:license: BSD
"""
from pandas import date_range, Timestamp
from pandas.tseries.frequencies import to_offset
def split_to_ranges(start, end, freq = 'D', tz = None, round = True):
"""
Generates a list of intervals that spans a given time period.
Parameters
----------
start : string or datetime-like,
Left bound for generating dates
end : string or datetime-like,
Right bound for generating dates
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
round: boolean, default True
specifies whether the end date is rounded up such that the last
interval is of length 'freq' or left as-is thus generating an irregular
final interval but allowing the set of intervals to exactly represent
the timespan between start and end.
"""
# Coerce start and end to Timestamp objects and freq to a DateOffset object
start = Timestamp(start)
end = Timestamp(end)
freq = to_offset(freq)
# Calculate timestamps that bracket time periods of length freq.
breaks = [d for d in date_range(start, end, freq = freq, tz = tz)]
# Extend if necessary so that the end timestamp is included.
if breaks[-1] < end:
if round:
breaks.append(breaks[-1] + freq)
else:
breaks.append(end)
# Generate the list of intervals.
return zip(breaks[:-1], breaks[1:])
| mit |
raghavrv/scikit-learn | sklearn/cluster/birch.py | 11 | 23640 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e155.py | 2 | 6834 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using LSTM not BLSTM
e151
* Max pooling
"""
def set_subsample_target(net, epoch):
net.source.subsample_target = 5
net.source.input_padding = 4
net.source.seq_length = 1500
net.generate_validation_data_and_set_shapes()
def exp_a(name):
# 151d but training for much longer and skip prob = 0.7
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1504,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=2000,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=.1, clip_range=(-1, 1)),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
10001: {
'remove_from': -3,
'callback': set_subsample_target,
'new_layers':
[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
20001: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
Python4AstronomersAndParticlePhysicists/PythonWorkshop-ICE | examples/use_system_latex/plot.py | 1 | 1122 | import matplotlib.pyplot as plt
import numpy as np
formula = (
r'$\displaystyle '
r'N = \int_{E_\text{min}}^{E_\text{max}} '
r'\int_0^A'
r'\int_{t_\text{min}}^{t_\text{max}} '
r' Φ_0 \left(\frac{E}{\SI{1}{\GeV}}\right)^{\!\!-γ}'
r' \, \symup{d}A \, \symup{d}t \, \symup{d}E'
r'$'
)
def power_law_spectrum(energy, normalisation, spectral_index):
return normalisation * energy**(-spectral_index)
bin_edges = np.logspace(2, 5, 15)
bin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])
plt.errorbar(
np.log10(bin_centers),
power_law_spectrum(bin_centers, 1e-12, 2.5),
xerr=[np.log10(bin_centers) - np.log10(bin_edges[:-1]), np.log10(bin_edges[1:]) - np.log10(bin_centers)],
yerr=0.2 * power_law_spectrum(bin_centers, 1e-12, 2.5),
linestyle='',
)
plt.xlabel(r'$\log_{10}\bigl(E \mathbin{/} \si{\giga\electronvolt}\bigr)$')
plt.ylabel(
r'$Φ'
r'\mathbin{/}'
r'\si{\per\GeV\per\second\per\steradian\per\meter\squared}$'
)
plt.text(0.1, 0.1, formula, transform=plt.gca().transAxes)
plt.yscale('log')
plt.tight_layout(pad=0)
plt.savefig('build/plot.pdf')
| mit |
furgerf/kaggle-projects | cancer/mxlearn.py | 1 | 3281 | import numpy as np
import dicom
import glob
# from matplotlib import pyplot as plt
import os
import cv2
import mxnet as mx
import pandas as pd
from sklearn import cross_validation
import xgboost as xgb
def get_extractor():
model = mx.model.FeedForward.load('model/resnet-50', 0, ctx=mx.cpu(), numpy_batch_size=1)
fea_symbol = model.symbol.get_internals()["flatten0_output"]
feature_extractor = mx.model.FeedForward(ctx=mx.cpu(), symbol=fea_symbol, numpy_batch_size=64,
arg_params=model.arg_params, aux_params=model.aux_params,
allow_extra_params=True)
return feature_extractor
def get_3d_data(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_data_id(path):
sample_image = get_3d_data(path)
sample_image[sample_image == -2000] = 0
# f, plots = plt.subplots(4, 5, sharex='col', sharey='row', figsize=(10, 8))
batch = []
cnt = 0
dx = 40
ds = 512
for i in range(0, sample_image.shape[0] - 3, 3):
tmp = []
for j in range(3):
img = sample_image[i + j]
img = 255.0 / np.amax(img) * img
img = cv2.equalizeHist(img.astype(np.uint8))
img = img[dx: ds - dx, dx: ds - dx]
img = cv2.resize(img, (224, 224))
tmp.append(img)
tmp = np.array(tmp)
batch.append(np.array(tmp))
# if cnt < 20:
# plots[cnt // 5, cnt % 5].axis('off')
# plots[cnt // 5, cnt % 5].imshow(np.swapaxes(tmp, 0, 2))
# cnt += 1
# plt.show()
batch = np.array(batch)
return batch
def calc_features():
net = get_extractor()
for folder in glob.glob('stage1/*'):
batch = get_data_id(folder)
feats = net.predict(batch)
print(feats.shape)
np.save(folder, feats)
def train_xgboost():
df = pd.read_csv('data/stage1_labels.csv')
print(df.head())
x = np.array([np.mean(np.load('stage1/%s.npy' % str(id)), axis=0) for id in df['id'].tolist()])
y = df['cancer'].as_matrix()
trn_x, val_x, trn_y, val_y = cross_validation.train_test_split(x, y, random_state=42, stratify=y,
test_size=0.20)
clf = xgb.XGBRegressor(max_depth=10,
n_estimators=1500,
min_child_weight=9,
learning_rate=0.05,
nthread=8,
subsample=0.80,
colsample_bytree=0.80,
seed=4242)
clf.fit(trn_x, trn_y, eval_set=[(val_x, val_y)], verbose=True, eval_metric='logloss', early_stopping_rounds=50)
return clf
def make_submit():
clf = train_xgboost()
df = pd.read_csv('data/stage1_sample_submission.csv')
x = np.array([np.mean(np.load('stage1/%s.npy' % str(id)), axis=0) for id in df['id'].tolist()])
pred = clf.predict(x)
df['cancer'] = pred
df.to_csv('subm1.csv', index=False)
print(df.head())
if __name__ == '__main__':
calc_features()
make_submit()
| apache-2.0 |
PG-TUe/tpot | tutorials/MAGIC Gamma Telescope/tpot_MAGIC_Gamma_Telescope_pipeline.py | 1 | 1041 | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.tree import DecisionTreeClassifier
from tpot.builtins import StackingEstimator
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=42)
# Score on the training set was:0.853347788745
exported_pipeline = make_pipeline(
StackingEstimator(estimator=LogisticRegression(C=10.0, dual=False, penalty="l2")),
DecisionTreeClassifier(criterion="gini", max_depth=7, min_samples_leaf=5, min_samples_split=7)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| lgpl-3.0 |
roxyboy/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
ilo10/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
martinwicke/tensorflow | tensorflow/examples/learn/iris_val_based_early_stopping.py | 25 | 2816 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
from tensorflow.contrib import learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10], n_classes=3, model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10], n_classes=3, model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
GGoussar/scikit-image | doc/examples/transform/plot_swirl.py | 7 | 2712 | """
=====
Swirl
=====
Image swirling is a non-linear image deformation that creates a whirlpool
effect. This example describes the implementation of this transform in
``skimage``, as well as the underlying warp mechanism.
Image warping
-------------
When applying a geometric transformation on an image, we typically make use of
a reverse mapping, i.e., for each pixel in the output image, we compute its
corresponding position in the input. The reason is that, if we were to do it
the other way around (map each input pixel to its new output position), some
pixels in the output may be left empty. On the other hand, each output
coordinate has exactly one corresponding location in (or outside) the input
image, and even if that position is non-integer, we may use interpolation to
compute the corresponding image value.
Performing a reverse mapping
----------------------------
To perform a geometric warp in ``skimage``, you simply need to provide the
reverse mapping to the ``skimage.transform.warp`` function. E.g., consider the
case where we would like to shift an image 50 pixels to the left. The reverse
mapping for such a shift would be::
def shift_left(xy):
xy[:, 0] += 50
return xy
The corresponding call to warp is::
from skimage.transform import warp
warp(image, shift_left)
The swirl transformation
------------------------
Consider the coordinate :math:`(x, y)` in the output image. The reverse
mapping for the swirl transformation first computes, relative to a center
:math:`(x_0, y_0)`, its polar coordinates,
.. math::
\\theta = \\arctan(y/x)
\\rho = \sqrt{(x - x_0)^2 + (y - y_0)^2},
and then transforms them according to
.. math::
r = \ln(2) \, \mathtt{radius} / 5
\phi = \mathtt{rotation}
s = \mathtt{strength}
\\theta' = \phi + s \, e^{-\\rho / r + \\theta}
where ``strength`` is a parameter for the amount of swirl, ``radius`` indicates
the swirl extent in pixels, and ``rotation`` adds a rotation angle. The
transformation of ``radius`` into :math:`r` is to ensure that the
transformation decays to :math:`\\approx 1/1000^{\mathsf{th}}` within the
specified radius.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.transform import swirl
image = data.checkerboard()
swirled = swirl(image, rotation=0, strength=10, radius=120)
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(8, 3),
sharex=True, sharey=True,
subplot_kw={'adjustable':'box-forced'})
ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
ax0.axis('off')
ax1.imshow(swirled, cmap=plt.cm.gray, interpolation='none')
ax1.axis('off')
plt.show()
| bsd-3-clause |
wmvanvliet/mne-python | examples/visualization/plot_publication_figure.py | 10 | 11215 | """
.. _ex-publication-figure:
===================================
Make figures more publication ready
===================================
In this example, we show several use cases to take MNE plots and
customize them for a more publication-ready look.
"""
# Authors: Eric Larson <[email protected]>
# Daniel McCloy <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
###############################################################################
# Imports
# -------
# We are importing everything we need for this example:
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import (make_axes_locatable, ImageGrid,
inset_locator)
import mne
###############################################################################
# Evoked plot with brain activation
# ---------------------------------
#
# Suppose we want a figure with an evoked plot on top, and the brain activation
# below, with the brain subplot slightly bigger than the evoked plot. Let's
# start by loading some :ref:`example data <sample-dataset>`.
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-eeg-lh.stc')
fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evoked = mne.read_evokeds(fname_evoked, 'Left Auditory')
evoked.pick_types(meg='grad').apply_baseline((None, 0.))
max_t = evoked.get_peak()[1]
stc = mne.read_source_estimate(fname_stc)
###############################################################################
# During interactive plotting, we might see figures like this:
evoked.plot()
stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample',
subjects_dir=subjects_dir, initial_time=max_t,
time_viewer=False, show_traces=False)
###############################################################################
# To make a publication-ready figure, first we'll re-plot the brain on a white
# background, take a screenshot of it, and then crop out the white margins.
# While we're at it, let's change the colormap, set custom colormap limits and
# remove the default colorbar (so we can add a smaller, vertical one later):
colormap = 'viridis'
clim = dict(kind='value', lims=[4, 8, 12])
# Plot the STC, get the brain image, crop it:
brain = stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample',
subjects_dir=subjects_dir, initial_time=max_t, background='w',
colorbar=False, clim=clim, colormap=colormap,
time_viewer=False, show_traces=False)
screenshot = brain.screenshot()
brain.close()
###############################################################################
# Now let's crop out the white margins and the white gap between hemispheres.
# The screenshot has dimensions ``(h, w, 3)``, with the last axis being R, G, B
# values for each pixel, encoded as integers between ``0`` and ``255``. ``(255,
# 255, 255)`` encodes a white pixel, so we'll detect any pixels that differ
# from that:
nonwhite_pix = (screenshot != 255).any(-1)
nonwhite_row = nonwhite_pix.any(1)
nonwhite_col = nonwhite_pix.any(0)
cropped_screenshot = screenshot[nonwhite_row][:, nonwhite_col]
# before/after results
fig = plt.figure(figsize=(4, 4))
axes = ImageGrid(fig, 111, nrows_ncols=(2, 1), axes_pad=0.5)
for ax, image, title in zip(axes, [screenshot, cropped_screenshot],
['Before', 'After']):
ax.imshow(image)
ax.set_title('{} cropping'.format(title))
###############################################################################
# A lot of figure settings can be adjusted after the figure is created, but
# many can also be adjusted in advance by updating the
# :data:`~matplotlib.rcParams` dictionary. This is especially useful when your
# script generates several figures that you want to all have the same style:
# Tweak the figure style
plt.rcParams.update({
'ytick.labelsize': 'small',
'xtick.labelsize': 'small',
'axes.labelsize': 'small',
'axes.titlesize': 'medium',
'grid.color': '0.75',
'grid.linestyle': ':',
})
###############################################################################
# Now let's create our custom figure. There are lots of ways to do this step.
# Here we'll create the figure and the subplot axes in one step, specifying
# overall figure size, number and arrangement of subplots, and the ratio of
# subplot heights for each row using :mod:`GridSpec keywords
# <matplotlib.gridspec>`. Other approaches (using
# :func:`~matplotlib.pyplot.subplot2grid`, or adding each axes manually) are
# shown commented out, for reference.
# sphinx_gallery_thumbnail_number = 4
# figsize unit is inches
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(4.5, 3.),
gridspec_kw=dict(height_ratios=[3, 4]))
# alternate way #1: using subplot2grid
# fig = plt.figure(figsize=(4.5, 3.))
# axes = [plt.subplot2grid((7, 1), (0, 0), rowspan=3),
# plt.subplot2grid((7, 1), (3, 0), rowspan=4)]
# alternate way #2: using figure-relative coordinates
# fig = plt.figure(figsize=(4.5, 3.))
# axes = [fig.add_axes([0.125, 0.58, 0.775, 0.3]), # left, bot., width, height
# fig.add_axes([0.125, 0.11, 0.775, 0.4])]
# we'll put the evoked plot in the upper axes, and the brain below
evoked_idx = 0
brain_idx = 1
# plot the evoked in the desired subplot, and add a line at peak activation
evoked.plot(axes=axes[evoked_idx])
peak_line = axes[evoked_idx].axvline(max_t, color='#66CCEE', ls='--')
# custom legend
axes[evoked_idx].legend(
[axes[evoked_idx].lines[0], peak_line], ['MEG data', 'Peak time'],
frameon=True, columnspacing=0.1, labelspacing=0.1,
fontsize=8, fancybox=True, handlelength=1.8)
# remove the "N_ave" annotation
axes[evoked_idx].texts = []
# Remove spines and add grid
axes[evoked_idx].grid(True)
axes[evoked_idx].set_axisbelow(True)
for key in ('top', 'right'):
axes[evoked_idx].spines[key].set(visible=False)
# Tweak the ticks and limits
axes[evoked_idx].set(
yticks=np.arange(-200, 201, 100), xticks=np.arange(-0.2, 0.51, 0.1))
axes[evoked_idx].set(
ylim=[-225, 225], xlim=[-0.2, 0.5])
# now add the brain to the lower axes
axes[brain_idx].imshow(cropped_screenshot)
axes[brain_idx].axis('off')
# add a vertical colorbar with the same properties as the 3D one
divider = make_axes_locatable(axes[brain_idx])
cax = divider.append_axes('right', size='5%', pad=0.2)
cbar = mne.viz.plot_brain_colorbar(cax, clim, colormap, label='Activation (F)')
# tweak margins and spacing
fig.subplots_adjust(
left=0.15, right=0.9, bottom=0.01, top=0.9, wspace=0.1, hspace=0.5)
# add subplot labels
for ax, label in zip(axes, 'AB'):
ax.text(0.03, ax.get_position().ymax, label, transform=fig.transFigure,
fontsize=12, fontweight='bold', va='top', ha='left')
###############################################################################
# Custom timecourse with montage inset
# ------------------------------------
#
# Suppose we want a figure with some mean timecourse extracted from a number of
# sensors, and we want a smaller panel within the figure to show a head outline
# with the positions of those sensors clearly marked.
# If you are familiar with MNE, you know that this is something that
# :func:`mne.viz.plot_compare_evokeds` does, see an example output in
# :ref:`ex-hf-sef-data` at the bottom.
#
# In this part of the example, we will show you how to achieve this result on
# your own figure, without having to use :func:`mne.viz.plot_compare_evokeds`!
#
# Let's start by loading some :ref:`example data <sample-dataset>`.
data_path = mne.datasets.sample.data_path()
fname_raw = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif")
raw = mne.io.read_raw_fif(fname_raw)
# For the sake of the example, we focus on EEG data
raw.pick_types(meg=False, eeg=True)
###############################################################################
# Let's make a plot.
# channels to plot:
to_plot = [f"EEG {i:03}" for i in range(1, 5)]
# get the data for plotting in a short time interval from 10 to 20 seconds
start = int(raw.info['sfreq'] * 10)
stop = int(raw.info['sfreq'] * 20)
data, times = raw.get_data(picks=to_plot,
start=start, stop=stop, return_times=True)
# Scale the data from the MNE internal unit V to µV
data *= 1e6
# Take the mean of the channels
mean = np.mean(data, axis=0)
# make a figure
fig, ax = plt.subplots(figsize=(4.5, 3))
# plot some EEG data
ax.plot(times, mean)
###############################################################################
# So far so good. Now let's add the smaller figure within the figure to show
# exactly, which sensors we used to make the timecourse.
# For that, we use an "inset_axes" that we plot into our existing axes.
# The head outline with the sensor positions can be plotted using the
# `~mne.io.Raw` object that is the source of our data.
# Specifically, that object already contains all the sensor positions,
# and we can plot them using the ``plot_sensors`` method.
# recreate the figure (only necessary for our documentation server)
fig, ax = plt.subplots(figsize=(4.5, 3))
ax.plot(times, mean)
axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2)
# pick_channels() edits the raw object in place, so we'll make a copy here
# so that our raw object stays intact for potential later analysis
raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins)
###############################################################################
# That looks nice. But the sensor dots are way too big for our taste. Luckily,
# all MNE-Python plots use Matplotlib under the hood and we can customize
# each and every facet of them.
# To make the sensor dots smaller, we need to first get a handle on them to
# then apply a ``*.set_*`` method on them.
# If we inspect our axes we find the objects contained in our plot:
print(axins.get_children())
###############################################################################
# That's quite a a lot of objects, but we know that we want to change the
# sensor dots, and those are most certainly a "PathCollection" object.
# So let's have a look at how many "collections" we have in the axes.
print(axins.collections)
###############################################################################
# There is only one! Those must be the sensor dots we were looking for.
# We finally found exactly what we needed. Sometimes this can take a bit of
# experimentation.
sensor_dots = axins.collections[0]
# Recreate the figure once more; shrink the sensor dots; add axis labels
fig, ax = plt.subplots(figsize=(4.5, 3))
ax.plot(times, mean)
axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2)
raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins)
sensor_dots = axins.collections[0]
sensor_dots.set_sizes([1])
# add axis labels, and adjust bottom figure margin to make room for them
ax.set(xlabel="Time (s)", ylabel="Amplitude (µV)")
fig.subplots_adjust(bottom=0.2)
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/widgets/sourcecode/asfsadf.py | 2 | 2207 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
from desktopmagic.screengrab_win32 import getScreenAsImage as getMultiScreenAsImage
from PIL import Image
import os
from pytank.Core.Classes import ScriptSettings
import re
###############################################
lastdur = os.getcwd()
atext = "Configurator.media.Click(threshold = .75,xoffset = 100)" # self.get_selected_text()
#atext = self.get_selected_text()
folderpath = "C:\Elan_Tools\ImageScripter\ProgramData\SystemFiles\Python\Lib\site-packages\pytank\User\UserData\Applications\\"
alist = atext.split('.')
theapp = alist[0]
print("App->")
print(theapp)
print('')
theimage = alist[1] + '.png'
print("Image->")
print(theimage)
print('')
theAction = alist[2]
print("Action->")
print(theAction)
print('')
#######################################
print("Threshold->")
if 'threshold' in atext:
spit_on_thresh = atext.split("threshold")
print(spit_on_thresh[1])
#threshold = spit_on_thresh[1].replace('= ','')
#print(threshold)
#threshold = float(threshold.replace(')',''))
#print(threshold)
####################################
'''
imagepath = folderpath + theapp + '\\' + theapp + '_' + theimage
print("Path of Image->")
print(imagepath)
print('')
if os.path.exists(imagepath):
os.chdir(r"C:\Elan_Tools\Data\Playground")
def Save_Multi_Monitor_ScreenShot():
im = getMultiScreenAsImage()
name_Of_Image = 'templet.png'
im.save(name_Of_Image, format='png')
Save_Multi_Monitor_ScreenShot()
########################################################################
img_rgb = cv2.imread("templet.png")
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(imagepath, 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = ScriptSettings.Threshold
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
cv2.imwrite("res.png", img_rgb)
if os.path.exists("res.png"):
img = Image.open("res.png")
img.show()
else:
print('pass image')
''' | gpl-3.0 |
eladnoor/proteomaps | src/amino_acid_distribution/aa_biosynthesis_pathways.py | 1 | 1764 | import matplotlib.pyplot as plt
import numpy as np
import csv
from download_aa_sequences import calculate_aa_dist_per_proteome as csp
from download_aa_sequences import normalize_aa_dist as nd
AA_LETTERS = sorted("ACEDGFIHKMLNQPSRTWVY")
aa_biosythesis_reader = csv.reader(open('aa_biosynthesis.csv'), delimiter='\t')
for row in aa_biosythesis_reader:
print row[0,:]
#bio_path_UPID = []
#aa_dist_csv_reader = csv.reader(open('aa_dist_by_UP_ID.csv'), delimiter='\t')
## skip the first header row
#aa_dist_csv_reader.next()
#
#load_UPID_to_aa_dist
#
#
#UPID_aa_dist = {}
#aa_dist_path_genomic = np.zeros((1, len(AA_LETTERS)))
#for row in aa_dist_csv_reader:
# if row[0] in bio_path_UPID:
# UPID_aa_dist[row[0]] = np.array([float(x) for x in row[1:]])
# distribution = [float(x) for x in row[1:len(AA_LETTERS)+1]]
# aa_dist_path_genomic += distribution
#aa_dist_path_genomic_normed = aa_dist_path_genomic/sum(aa_dist_path_genomic)
#a = csp('Ecoli_19_Conditions_Proteomics.csv', UPID_aa_dist)
##print a
#a_normed = nd(a,20)
#
#AA_PATH = sorted(['P_'+ x for x in AA_LETTERS], reverse=True)
#plt.clf()
#plt.imshow(a_normed, interpolation="nearest")
#plt.colorbar()
#plt.show()
#N = len(AA_LETTERS)
#ind = np.arange(N) # the x locations for the groups
#width = 0.1 # the width of the bars
#ax = plt.axes()
#ax.set_ylabel('Relative abundance')
#ax.set_title('Lysine (K) biosynthesis pathway')
#ax.set_xticks(ind+width)
#ax.set_xticklabels((AA_LETTERS))
#ax.set_yticks(ind+width)
#ax.set_yticklabels((AA_PATH))
#fig = plt.figure()
#rects1 = ax.bar(ind, aa_dist_path_genomic_normed.T, width, color='b')
##rects2 = ax.bar(ind+width,aa_dist_path_proteomic_normed.T , width, color='g')
## add some
##ax.legend( (rects1[0]),('Proteome')) | mit |
cactusbin/nyt | matplotlib/examples/pylab_examples/line_collection.py | 12 | 1511 | import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
import numpy as np
# In order to efficiently plot many lines in a single set of axes,
# Matplotlib has the ability to add the lines all at once. Here is a
# simple example showing how it is done.
x = np.arange(100)
# Here are many sets of y to plot vs x
ys = x[:50, np.newaxis] + x[np.newaxis, :]
segs = np.zeros((50, 100, 2), float)
segs[:,:,1] = ys
segs[:,:,0] = x
# Mask some values to test masked array support:
segs = np.ma.masked_where((segs > 50) & (segs < 60), segs)
# We need to set the plot limits.
ax = plt.axes()
ax.set_xlim(x.min(), x.max())
ax.set_ylim(ys.min(), ys.max())
# colors is sequence of rgba tuples
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)
# where onoffseq is an even length tuple of on and off ink in points.
# If linestyle is omitted, 'solid' is used
# See matplotlib.collections.LineCollection for more information
line_segments = LineCollection(segs,
linewidths = (0.5,1,1.5,2),
colors = [colorConverter.to_rgba(i) \
for i in ('b','g','r','c','m','y','k')],
linestyle = 'solid')
ax.add_collection(line_segments)
ax.set_title('Line collection with masked arrays')
plt.show()
| unlicense |
nansencenter/DAPPER | examples/basic_3b.py | 1 | 2827 | # ## Present the results generated in example basic_3a
#
import matplotlib.pyplot as plt
import numpy as np
import dapper as dpr
save_as = dpr.rc.dirs.data / "basic_3"
# save_as /= "run_2020-11-11__20-36-36"
save_as /= dpr.find_latest_run(save_as)
# +
# Load
xps = dpr.load_xps(save_as)
# Prints all
# dpr.xpList(xps).print_avrgs(statkeys=["rmse.a","rmv.a"])
# -
# Associate each control variable with a coordinate/dimension
xp_dict = dpr.xpSpace.from_list(xps)
# Single out (highlight) particular settings.
# Note: Must use infl=1.01 (not 1) to reproduce "no infl" scores in Ref[1],
# as well as rot=True (better scores can be obtained without rot).
highlight = xp_dict.label_xSection
highlight('NO-infl' , ('infl'), da_method='LETKF', infl=1.01, rot=True)
highlight('NO-infl/loc', ('infl'), da_method='EnKF' , infl=1.01, rot=True)
# Print, with columns: `inner`. Also try setting `outer=None`.
tunable = {'loc_rad', 'infl', 'xB', 'rot'}
axes = dict(outer="F", inner="N", mean="seed", optim=tunable)
xp_dict.print("rmse.a", axes, subcols=False)
def get_style(coord):
"""Quick and dirty styling."""
S = dpr.default_styles(coord, True)
if coord.da_method == "EnKF":
upd_a = getattr(coord, "upd_a", None)
if upd_a == "PertObs":
S.c = "C2"
elif upd_a == "Sqrt":
S.c = "C1"
elif coord.da_method == "LETKF":
S.c = "C3"
if getattr(coord, "rot", False):
S.marker = "+"
Const = getattr(coord, "Const", False)
if str(Const).startswith("NO-"):
S.ls = "--"
S.marker = None
S.label = Const
return S
# Plot
tables = xp_dict.plot('rmse.a', axes, get_style, title2=save_as)
dpr.default_fig_adjustments(tables)
plt.pause(.1)
# #### Plot with color gradient
# Remove experiments we don't want to plot here
xps = [xp for xp in xps if getattr(xp, "Const", None) == None]
xp_dict = dpr.xpSpace.from_list(xps)
# Setup mapping: loc_rad --> color gradient
graded = "loc_rad"
axes["optim"] -= {graded}
grades = xp_dict.tickz(graded)
# cmap, sm = dpr.discretize_cmap(cm.Reds, len(grades), .2)
cmap, sm = dpr.discretize_cmap(plt.cm.rainbow, len(grades))
def get_style_with_gradient(coord):
S = get_style(coord)
if coord.da_method == "LETKF":
grade = dpr.rel_index(getattr(coord, graded), grades, 1)
S.c = cmap(grade)
S.marker = None
S.label = dpr.make_label(coord, exclude=[graded])
return S
# +
# Plot
tables = xp_dict.plot('rmse.a', axes, get_style_with_gradient, title2=save_as)
dpr.default_fig_adjustments(tables)
# Colorbar
cb = tables.fig.colorbar(sm, ax=tables[-1].panels[0], label=graded)
cb.set_ticks(np.arange(len(grades)))
cb.set_ticklabels(grades)
plt.pause(.1)
# -
# #### Excercise:
# Make a `get_style()` that works well with `graded = "infl"`.
| mit |
juliandewit/kaggle_ndsb2017 | step3_predict_nodules.py | 2 | 18258 | import settings
import helpers
import sys
import os
import glob
import random
import pandas
import ntpath
import cv2
import numpy
from typing import List, Tuple
from keras.optimizers import Adam, SGD
from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, merge, Convolution3D, MaxPooling3D, UpSampling3D, LeakyReLU, BatchNormalization, Flatten, Dense, Dropout, ZeroPadding3D, AveragePooling3D, Activation
from keras.models import Model, load_model, model_from_json
from keras.metrics import binary_accuracy, binary_crossentropy, mean_squared_error, mean_absolute_error
from keras import backend as K
from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import math
# limit memory usage..
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import step2_train_nodule_detector
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
# zonder aug, 10:1 99 train, 97 test, 0.27 cross entropy, before commit 573
# 3 pools istead of 4 gives (bigger end layer) gives much worse validation accuray + logloss .. strange ?
# 32 x 32 x 32 lijkt het beter te doen dan 48 x 48 x 48..
K.set_image_dim_ordering("tf")
CUBE_SIZE = step2_train_nodule_detector.CUBE_SIZE
MEAN_PIXEL_VALUE = settings.MEAN_PIXEL_VALUE_NODULE
NEGS_PER_POS = 20
P_TH = 0.6
PREDICT_STEP = 12
USE_DROPOUT = False
def prepare_image_for_net3D(img):
img = img.astype(numpy.float32)
img -= MEAN_PIXEL_VALUE
img /= 255.
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2], 1)
return img
def filter_patient_nodules_predictions(df_nodule_predictions: pandas.DataFrame, patient_id, view_size, luna16=False):
src_dir = settings.LUNA_16_TRAIN_DIR2D2 if luna16 else settings.NDSB3_EXTRACTED_IMAGE_DIR
patient_mask = helpers.load_patient_images(patient_id, src_dir, "*_m.png")
delete_indices = []
for index, row in df_nodule_predictions.iterrows():
z_perc = row["coord_z"]
y_perc = row["coord_y"]
center_x = int(round(row["coord_x"] * patient_mask.shape[2]))
center_y = int(round(y_perc * patient_mask.shape[1]))
center_z = int(round(z_perc * patient_mask.shape[0]))
mal_score = row["diameter_mm"]
start_y = center_y - view_size / 2
start_x = center_x - view_size / 2
nodule_in_mask = False
for z_index in [-1, 0, 1]:
img = patient_mask[z_index + center_z]
start_x = int(start_x)
start_y = int(start_y)
view_size = int(view_size)
img_roi = img[start_y:start_y+view_size, start_x:start_x + view_size]
if img_roi.sum() > 255: # more than 1 pixel of mask.
nodule_in_mask = True
if not nodule_in_mask:
print("Nodule not in mask: ", (center_x, center_y, center_z))
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
else:
if center_z < 30:
print("Z < 30: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
if (z_perc > 0.75 or z_perc < 0.25) and y_perc > 0.85:
print("SUSPICIOUS FALSEPOSITIVE: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if center_z < 50 and y_perc < 0.30:
print("SUSPICIOUS FALSEPOSITIVE OUT OF RANGE: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
df_nodule_predictions.drop(df_nodule_predictions.index[delete_indices], inplace=True)
return df_nodule_predictions
def filter_nodule_predictions(only_patient_id=None):
src_dir = settings.NDSB3_NODULE_DETECTION_DIR
for csv_index, csv_path in enumerate(glob.glob(src_dir + "*.csv")):
file_name = ntpath.basename(csv_path)
patient_id = file_name.replace(".csv", "")
print(csv_index, ": ", patient_id)
if only_patient_id is not None and patient_id != only_patient_id:
continue
df_nodule_predictions = pandas.read_csv(csv_path)
filter_patient_nodules_predictions(df_nodule_predictions, patient_id, CUBE_SIZE)
df_nodule_predictions.to_csv(csv_path, index=False)
def make_negative_train_data_based_on_predicted_luna_nodules():
src_dir = settings.LUNA_NODULE_DETECTION_DIR
pos_labels_dir = settings.LUNA_NODULE_LABELS_DIR
keep_dist = CUBE_SIZE + CUBE_SIZE / 2
total_false_pos = 0
for csv_index, csv_path in enumerate(glob.glob(src_dir + "*.csv")):
file_name = ntpath.basename(csv_path)
patient_id = file_name.replace(".csv", "")
# if not "273525289046256012743471155680" in patient_id:
# continue
df_nodule_predictions = pandas.read_csv(csv_path)
pos_annos_manual = None
manual_path = settings.MANUAL_ANNOTATIONS_LABELS_DIR + patient_id + ".csv"
if os.path.exists(manual_path):
pos_annos_manual = pandas.read_csv(manual_path)
filter_patient_nodules_predictions(df_nodule_predictions, patient_id, CUBE_SIZE, luna16=True)
pos_labels = pandas.read_csv(pos_labels_dir + patient_id + "_annos_pos_lidc.csv")
print(csv_index, ": ", patient_id, ", pos", len(pos_labels))
patient_imgs = helpers.load_patient_images(patient_id, settings.LUNA_16_TRAIN_DIR2D2, "*_m.png")
for nod_pred_index, nod_pred_row in df_nodule_predictions.iterrows():
if nod_pred_row["diameter_mm"] < 0:
continue
nx, ny, nz = helpers.percentage_to_pixels(nod_pred_row["coord_x"], nod_pred_row["coord_y"], nod_pred_row["coord_z"], patient_imgs)
diam_mm = nod_pred_row["diameter_mm"]
for label_index, label_row in pos_labels.iterrows():
px, py, pz = helpers.percentage_to_pixels(label_row["coord_x"], label_row["coord_y"], label_row["coord_z"], patient_imgs)
dist = math.sqrt(math.pow(nx - px, 2) + math.pow(ny - py, 2) + math.pow(nz- pz, 2))
if dist < keep_dist:
if diam_mm >= 0:
diam_mm *= -1
df_nodule_predictions.loc[nod_pred_index, "diameter_mm"] = diam_mm
break
if pos_annos_manual is not None:
for index, label_row in pos_annos_manual.iterrows():
px, py, pz = helpers.percentage_to_pixels(label_row["x"], label_row["y"], label_row["z"], patient_imgs)
diameter = label_row["d"] * patient_imgs[0].shape[1]
# print((pos_coord_x, pos_coord_y, pos_coord_z))
# print(center_float_rescaled)
dist = math.sqrt(math.pow(px - nx, 2) + math.pow(py - ny, 2) + math.pow(pz - nz, 2))
if dist < (diameter + 72): # make sure we have a big margin
if diam_mm >= 0:
diam_mm *= -1
df_nodule_predictions.loc[nod_pred_index, "diameter_mm"] = diam_mm
print("#Too close", (nx, ny, nz))
break
df_nodule_predictions.to_csv(csv_path, index=False)
df_nodule_predictions = df_nodule_predictions[df_nodule_predictions["diameter_mm"] >= 0]
df_nodule_predictions.to_csv(pos_labels_dir + patient_id + "_candidates_falsepos.csv", index=False)
total_false_pos += len(df_nodule_predictions)
print("Total false pos:", total_false_pos)
def predict_cubes(model_path, continue_job, only_patient_id=None, luna16=False, magnification=1, flip=False, train_data=True, holdout_no=-1, ext_name="", fold_count=2):
if luna16:
dst_dir = settings.LUNA_NODULE_DETECTION_DIR
else:
dst_dir = settings.NDSB3_NODULE_DETECTION_DIR
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
holdout_ext = ""
# if holdout_no is not None:
# holdout_ext = "_h" + str(holdout_no) if holdout_no >= 0 else ""
flip_ext = ""
if flip:
flip_ext = "_flip"
dst_dir += "predictions" + str(int(magnification * 10)) + holdout_ext + flip_ext + "_" + ext_name + "/"
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
sw = helpers.Stopwatch.start_new()
model = step2_train_nodule_detector.get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=model_path)
if not luna16:
if train_data:
labels_df = pandas.read_csv("resources/stage1_labels.csv")
labels_df.set_index(["id"], inplace=True)
else:
labels_df = pandas.read_csv("resources/stage2_sample_submission.csv")
labels_df.set_index(["id"], inplace=True)
patient_ids = []
for file_name in os.listdir(settings.NDSB3_EXTRACTED_IMAGE_DIR):
if not os.path.isdir(settings.NDSB3_EXTRACTED_IMAGE_DIR + file_name):
continue
patient_ids.append(file_name)
all_predictions_csv = []
for patient_index, patient_id in enumerate(reversed(patient_ids)):
if not luna16:
if patient_id not in labels_df.index:
continue
if "metadata" in patient_id:
continue
if only_patient_id is not None and only_patient_id != patient_id:
continue
if holdout_no is not None and train_data:
patient_fold = helpers.get_patient_fold(patient_id)
patient_fold %= fold_count
if patient_fold != holdout_no:
continue
print(patient_index, ": ", patient_id)
csv_target_path = dst_dir + patient_id + ".csv"
if continue_job and only_patient_id is None:
if os.path.exists(csv_target_path):
continue
patient_img = helpers.load_patient_images(patient_id, settings.NDSB3_EXTRACTED_IMAGE_DIR, "*_i.png", [])
if magnification != 1:
patient_img = helpers.rescale_patient_images(patient_img, (1, 1, 1), magnification)
patient_mask = helpers.load_patient_images(patient_id, settings.NDSB3_EXTRACTED_IMAGE_DIR, "*_m.png", [])
if magnification != 1:
patient_mask = helpers.rescale_patient_images(patient_mask, (1, 1, 1), magnification, is_mask_image=True)
# patient_img = patient_img[:, ::-1, :]
# patient_mask = patient_mask[:, ::-1, :]
step = PREDICT_STEP
CROP_SIZE = CUBE_SIZE
# CROP_SIZE = 48
predict_volume_shape_list = [0, 0, 0]
for dim in range(3):
dim_indent = 0
while dim_indent + CROP_SIZE < patient_img.shape[dim]:
predict_volume_shape_list[dim] += 1
dim_indent += step
predict_volume_shape = (predict_volume_shape_list[0], predict_volume_shape_list[1], predict_volume_shape_list[2])
predict_volume = numpy.zeros(shape=predict_volume_shape, dtype=float)
print("Predict volume shape: ", predict_volume.shape)
done_count = 0
skipped_count = 0
batch_size = 128
batch_list = []
batch_list_coords = []
patient_predictions_csv = []
cube_img = None
annotation_index = 0
for z in range(0, predict_volume_shape[0]):
for y in range(0, predict_volume_shape[1]):
for x in range(0, predict_volume_shape[2]):
#if cube_img is None:
cube_img = patient_img[z * step:z * step+CROP_SIZE, y * step:y * step + CROP_SIZE, x * step:x * step+CROP_SIZE]
cube_mask = patient_mask[z * step:z * step+CROP_SIZE, y * step:y * step + CROP_SIZE, x * step:x * step+CROP_SIZE]
if cube_mask.sum() < 2000:
skipped_count += 1
else:
if flip:
cube_img = cube_img[:, :, ::-1]
if CROP_SIZE != CUBE_SIZE:
cube_img = helpers.rescale_patient_images2(cube_img, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))
# helpers.save_cube_img("c:/tmp/cube.png", cube_img, 8, 4)
# cube_mask = helpers.rescale_patient_images2(cube_mask, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))
img_prep = prepare_image_for_net3D(cube_img)
batch_list.append(img_prep)
batch_list_coords.append((z, y, x))
if len(batch_list) % batch_size == 0:
batch_data = numpy.vstack(batch_list)
p = model.predict(batch_data, batch_size=batch_size)
for i in range(len(p[0])):
p_z = batch_list_coords[i][0]
p_y = batch_list_coords[i][1]
p_x = batch_list_coords[i][2]
nodule_chance = p[0][i][0]
predict_volume[p_z, p_y, p_x] = nodule_chance
if nodule_chance > P_TH:
p_z = p_z * step + CROP_SIZE / 2
p_y = p_y * step + CROP_SIZE / 2
p_x = p_x * step + CROP_SIZE / 2
p_z_perc = round(p_z / patient_img.shape[0], 4)
p_y_perc = round(p_y / patient_img.shape[1], 4)
p_x_perc = round(p_x / patient_img.shape[2], 4)
diameter_mm = round(p[1][i][0], 4)
# diameter_perc = round(2 * step / patient_img.shape[2], 4)
diameter_perc = round(2 * step / patient_img.shape[2], 4)
diameter_perc = round(diameter_mm / patient_img.shape[2], 4)
nodule_chance = round(nodule_chance, 4)
patient_predictions_csv_line = [annotation_index, p_x_perc, p_y_perc, p_z_perc, diameter_perc, nodule_chance, diameter_mm]
patient_predictions_csv.append(patient_predictions_csv_line)
all_predictions_csv.append([patient_id] + patient_predictions_csv_line)
annotation_index += 1
batch_list = []
batch_list_coords = []
done_count += 1
if done_count % 10000 == 0:
print("Done: ", done_count, " skipped:", skipped_count)
df = pandas.DataFrame(patient_predictions_csv, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "nodule_chance", "diameter_mm"])
filter_patient_nodules_predictions(df, patient_id, CROP_SIZE * magnification)
df.to_csv(csv_target_path, index=False)
# cols = ["anno_index", "nodule_chance", "diamete_mm"] + ["f" + str(i) for i in range(64)]
# df_features = pandas.DataFrame(patient_features_csv, columns=cols)
# for index, row in df.iterrows():
# if row["diameter_mm"] < 0:
# print("Dropping")
# anno_index = row["anno_index"]
# df_features.drop(df_features[df_features["anno_index"] == anno_index].index, inplace=True)
#
# df_features.to_csv(csv_target_path_features, index=False)
# df = pandas.DataFrame(all_predictions_csv, columns=["patient_id", "anno_index", "coord_x", "coord_y", "coord_z", "diameter", "nodule_chance", "diameter_mm"])
# df.to_csv("c:/tmp/tmp2.csv", index=False)
print(predict_volume.mean())
print("Done in : ", sw.get_elapsed_seconds(), " seconds")
if __name__ == "__main__":
CONTINUE_JOB = True
only_patient_id = None # "ebd601d40a18634b100c92e7db39f585"
if not CONTINUE_JOB or only_patient_id is not None:
for file_path in glob.glob("c:/tmp/*.*"):
if not os.path.isdir(file_path):
remove_file = True
if only_patient_id is not None:
if only_patient_id not in file_path:
remove_file = False
remove_file = False
if remove_file:
os.remove(file_path)
if True:
for magnification in [1, 1.5, 2]: #
predict_cubes("models/model_luna16_full__fs_best.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=True, holdout_no=None, ext_name="luna16_fs")
predict_cubes("models/model_luna16_full__fs_best.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=False, holdout_no=None, ext_name="luna16_fs")
if True:
for version in [2, 1]:
for holdout in [0, 1]:
for magnification in [1, 1.5, 2]: #
predict_cubes("models/model_luna_posnegndsb_v" + str(version) + "__fs_h" + str(holdout) + "_end.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=True, holdout_no=holdout, ext_name="luna_posnegndsb_v" + str(version), fold_count=2)
if holdout == 0:
predict_cubes("models/model_luna_posnegndsb_v" + str(version) + "__fs_h" + str(holdout) + "_end.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=False, holdout_no=holdout, ext_name="luna_posnegndsb_v" + str(version), fold_count=2)
| mit |
mklauser/tardis | tardis/io/config_reader.py | 1 | 44501 | # Module to read the rather complex config data
# Currently the configuration file is documented in
# tardis/data/example_configuration.ini
import logging
import os
import pprint
import copy
from astropy import constants, units as u
import astropy.utils
import numpy as np
import pandas as pd
import yaml
from model_reader import read_density_file, calculate_density_after_time, read_abundances_file
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, element_symbol2atomic_number
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data'))
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters:
-----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns:
--------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calc_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:param velocities: Array like velocity profile.
:param rho0: rho at v0
:param velocity_0: the velocity at the inner shell
:param a: proportionality constant
:return: Array like density profile
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calc_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v_0}{v} \\right)^n`
Parameters
----------
velocities : Array like list
velocities in km/s
velocity_0 : ~float
Velocity at the inner boundary
rho_0 : ~float
density at velocity_0
exponent : ~float
exponent used in the powerlaw
Returns
-------
Array like density structure
"""
densities = rho_0 * np.power((velocity_0 / velocities), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(), index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(-(time_explosion* inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(-(time_explosion* inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(-(time_explosion* inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells*split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:,i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return parse_quantity(density_dict['value']).to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0', 19.9999584)
if isinstance(time_0, basestring):
time_0 = parse_quantity(time_0).to('s')
else:
time_0 *= u.s
logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0)
density_coefficient = density_dict.pop('density_coefficient', None)
if density_coefficient is None:
density_coefficient = 3e29 * u.Unit('g/cm^3')
logger.debug('density_coefficient not supplied for density type branch85 - using sensible default %g',
density_coefficient)
else:
density_coefficient = parse_quantity(density_coefficient)
velocities = 0.5 * (v_inner + v_outer)
densities = density_coefficient * (velocities.value * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0', 19.9999584)
if isinstance(time_0, basestring):
time_0 = parse_quantity(time_0).to('s').value
else:
logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0)
try:
rho_0 = density_dict.pop('rho_0')
if isinstance(rho_0, basestring):
rho_0 = parse_quantity(rho_0).to('g/cm^3').value
else:
raise KeyError
except KeyError:
rho_0 = 1e-2
logger.warning('rho_o was not given in the config! Using %g', rho_0)
try:
exponent = density_dict.pop('exponent')
except KeyError:
exponent = 2
logger.warning('exponent was not given in the config file! Using %f', exponent)
velocities = 0.5 * (v_inner + v_outer)
densities = calc_power_law_density(velocities, v_inner[0], rho_0, exponent)
densities = u.Quantity(densities, 'g/cm^3')
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0', 19.9999584)
if isinstance(time_0, basestring):
time_0 = parse_quantity(time_0).to('s').value
else:
logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0)
try:
rho_0 = density_dict.pop('rho_0')
if isinstance(rho_0, basestring):
rho_0 = parse_quantity(rho_0).to('g/cm^3').value
else:
raise KeyError
except KeyError:
rho_0 = 1e-2
logger.warning('rho_o was not given in the config! Using %g', rho_0)
try:
v_0 = density_dict.pop('v_0')
if isinstance(v_0, basestring):
v_0 = parse_quantity(v_0).to('km/s').value
except KeyError:
v_0 = 1
logger.warning('v_0 was not given in the config file! Using %f km/s', v_0)
velocities = 0.5 * (v_inner + v_outer)
densities = calc_exponential_density(velocities, v_0, rho_0)
densities = u.Quantity(densities, 'g/cm^3')
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']).\
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']).\
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class TARDISConfigurationNameSpace(object):
def __init__(self, config_dict):
self.config_dict = config_dict
def __getattr__(self, item):
if item in self.config_dict:
config_item = self.config_dict[item]
if isinstance(config_item, dict):
setattr(self, item, TARDISConfigurationNameSpace(config_item))
return getattr(self, item)
else:
return self.config_dict[item]
else:
return super(TARDISConfigurationNameSpace, self).__getattribute__(item)
def __getitem__(self, item):
return self.config_dict.__getitem__(item)
def get(self, k, d=None):
return self.config_dict.get(k, d)
def __repr__(self):
return pp.pformat(self.config_dict)
def __dir__(self):
return self.__dict__.keys() + self.config_dict.keys()
class TARDISConfiguration(TARDISConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, raw_dict, atom_data=None, test_parser=False):
"""
Reading in from a YAML file and commandline args. Preferring commandline args when given
Parameters
----------
fname : filename for the yaml file
args : namespace object
Not implemented Yet
Returns
-------
`tardis.config_reader.TARDISConfiguration`
"""
config_dict = {}
raw_dict = copy.deepcopy(raw_dict)
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in raw_dict.keys():
atom_data_fname = raw_dict['atom_data']
config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
config_dict['supernova'] = parse_supernova_section(raw_dict['supernova'])
#Parsing the model section
model_section = raw_dict.pop('model')
v_inner = None
v_outer = None
mean_densities = None
abundances = None
if 'file' in model_section:
v_inner, v_outer, mean_densities, abundances = parse_model_file_section(model_section.pop('file'),
config_dict['supernova']['time_explosion'])
no_of_shells = len(v_inner)
structure_config_dict = {}
if 'structure' in model_section:
#Trying to figure out the structure (number of shells)
structure_section = model_section.pop('structure')
inner_boundary_index, outer_boundary_index = None, None
try:
structure_section_type = structure_section['type']
except KeyError:
raise ConfigurationError('Structure section requires "type" keyword')
if structure_section_type == 'specific':
velocities = parse_quantity_linspace(structure_section['velocity']).to('cm/s')
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(structure_section['density'], v_inner, v_outer,
config_dict['supernova']['time_explosion'])
elif structure_section_type == 'file':
v_inner_boundary, v_outer_boundary = structure_section.get('v_inner_boundary', 0 * u.km/u.s), \
structure_section.get('v_outer_boundary', np.inf * u.km/u.s)
if not hasattr(v_inner_boundary, 'unit'):
v_inner_boundary = parse_quantity(v_inner_boundary)
if not hasattr(v_outer_boundary, 'unit'):
v_outer_boundary = parse_quantity(v_outer_boundary)
v_inner, v_outer, mean_densities, inner_boundary_index, outer_boundary_index =\
read_density_file(structure_section['filename'], structure_section['filetype'],
config_dict['supernova']['time_explosion'], v_inner_boundary, v_outer_boundary)
else:
raise ConfigurationError('structure section required in configuration file')
r_inner = config_dict['supernova']['time_explosion'] * v_inner
r_outer = config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_config_dict['v_inner'] = v_inner
structure_config_dict['v_outer'] = v_outer
structure_config_dict['mean_densities'] = mean_densities
no_of_shells = len(v_inner)
structure_config_dict['no_of_shells'] = no_of_shells
structure_config_dict['r_inner'] = r_inner
structure_config_dict['r_outer'] = r_outer
structure_config_dict['r_middle'] = r_middle
structure_config_dict['volumes'] = (4. / 3) * np.pi * (r_outer ** 3 - r_inner ** 3)
config_dict['structure'] = structure_config_dict
#Now that the structure section is parsed we move on to the abundances
abundances_section = model_section.pop('abundances')
abundances_type = abundances_section.pop('type')
if abundances_type == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_type == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = raw_dict.pop('plasma')
plasma_config_dict = {}
if plasma_section['ionization'] not in ('nebular', 'lte'):
raise ConfigurationError('plasma_type only allowed to be "nebular" or "lte"')
plasma_config_dict['ionization'] = plasma_section['ionization']
if plasma_section['excitation'] not in ('dilute-lte', 'lte'):
raise ConfigurationError('plasma_type only allowed to be "nebular" or "lte"')
plasma_config_dict['excitation'] = plasma_section['excitation']
if plasma_section['radiative_rates_type'] not in ('dilute-blackbody', 'detailed'):
raise ConfigurationError('radiative_rates_types must be either "dilute-blackbody" or "detailed"')
plasma_config_dict['radiative_rates_type'] = plasma_section['radiative_rates_type']
if plasma_section['line_interaction_type'] not in ('scatter', 'downbranch', 'macroatom'):
raise ConfigurationError('radiative_rates_types must be either "scatter", "downbranch", or "macroatom"')
plasma_config_dict['line_interaction_type'] = plasma_section['line_interaction_type']
if 'w_epsilon' in plasma_section:
plasma_config_dict['w_epsilon'] = plasma_section['w_epsilon']
else:
logger.warn('"w_epsilon" not specified in plasma section - setting it to 1e-10')
plasma_config_dict['w_epsilon'] = 1e-10
if 'delta_treatment' in plasma_section:
plasma_config_dict['delta_treatment'] = plasma_section['delta_treatment']
else:
logger.warn('"delta_treatment" not specified in plasma section - defaulting to None')
plasma_config_dict['delta_treatment'] = None
if 'initial_t_inner' in plasma_section:
plasma_config_dict['t_inner'] = parse_quantity(plasma_section['initial_t_inner']).to('K')
else:
plasma_config_dict['t_inner'] = (((config_dict['supernova']['luminosity_requested'] / \
(4 * np.pi * r_inner[0]**2 * constants.sigma_sb))**.5)**.5).to('K')
logger.info('"initial_t_inner" is not specified in the plasma section - '
'initializing to %s with given luminosity', plasma_config_dict['t_inner'])
if 'initial_t_rads' in plasma_section:
if isinstance('initial_t_rads', basestring):
uniform_t_rads = parse_quantity(plasma_section['initial_t_rads'])
plasma_config_dict['t_rads'] = u.Quantity(np.ones(no_of_shells) * uniform_t_rads.value, u.K)
elif astropy.utils.isiterable(plasma_section['initial_t_rads']):
assert len(plasma_section['initial_t_rads']) == no_of_shells
plasma_config_dict['t_rads'] = u.Quantity(plasma_section['initial_t_rads'], u.K)
else:
logger.info('No "initial_t_rads" specified - initializing with 10000 K')
plasma_config_dict['t_rads'] = u.Quantity(np.ones(no_of_shells) * 10000., u.K)
##### NLTE subsection of Plasma start
nlte_config_dict = {}
nlte_species = []
if 'nlte' in plasma_section:
nlte_section = plasma_section['nlte']
if 'species' in nlte_section:
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_config_dict['species'] = nlte_species
nlte_config_dict['species_string'] = nlte_species_list
nlte_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_config_dict:
nlte_config_dict['species'] = []
plasma_config_dict['nlte'] = nlte_config_dict
#^^^^^^^ NLTE subsection of Plasma end
config_dict['plasma'] = plasma_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = raw_dict.pop('montecarlo')
montecarlo_config_dict = {}
#PARSING convergence section
convergence_variables = ['t_inner', 't_rad', 'w']
convergence_config_dict = {}
if 'convergence_strategy' in montecarlo_section:
convergence_section = montecarlo_section.pop('convergence_strategy')
if 'lock_t_inner_cycles' in convergence_section:
lock_t_inner_cycles = convergence_section['lock_t_inner_cycles']
logger.info('lock_t_inner_cycles set to %d cycles', lock_t_inner_cycles)
else:
lock_t_inner_cycles = None
if 't_inner_update_exponent' in convergence_section:
t_inner_update_exponent = convergence_section['t_inner_update_exponent']
logger.info('t_inner update exponent set to %g', t_inner_update_exponent)
else:
t_inner_update_exponent = None
if convergence_section['type'] == 'damped':
convergence_config_dict['type'] == 'damped'
global_damping_constant = convergence_section['damping_constant']
for convergence_variable in convergence_variables:
convergence_parameter_name = convergence_variable
current_convergence_parameters = {}
convergence_config_dict[convergence_parameter_name] = current_convergence_parameters
if convergence_variable in convergence_section:
current_convergence_parameters['damping_constant'] \
= convergence_section[convergence_variable]['damping_constant']
else:
current_convergence_parameters['damping_constant'] = global_damping_constant
elif convergence_section['type'] == 'specific':
convergence_config_dict['type'] = 'specific'
global_convergence_parameters = {}
global_convergence_parameters['damping_constant'] = convergence_section['damping_constant']
global_convergence_parameters['threshold'] = convergence_section['threshold']
global_convergence_parameters['fraction'] = convergence_section['fraction']
for convergence_variable in convergence_variables:
convergence_parameter_name = convergence_variable
current_convergence_parameters = {}
convergence_config_dict[convergence_parameter_name] = current_convergence_parameters
if convergence_variable in convergence_section:
for param in global_convergence_parameters.keys():
if param == 'fraction' and convergence_variable == 't_inner':
continue
if param in convergence_section[convergence_variable]:
current_convergence_parameters[param] = convergence_section[convergence_variable][param]
else:
current_convergence_parameters[param] = global_convergence_parameters[param]
else:
convergence_config_dict[convergence_parameter_name] = global_convergence_parameters.copy()
global_convergence_parameters['hold'] = convergence_section['hold']
convergence_config_dict['global_convergence_parameters'] = global_convergence_parameters
else:
raise ValueError("convergence criteria unclear %s", convergence_section['type'])
else:
lock_t_inner_cycles = None
t_inner_update_exponent = None
logger.warning('No convergence criteria selected - just damping by 0.5 for w, t_rad and t_inner')
convergence_config_dict['type'] = 'damped'
for convergence_variable in convergence_variables:
convergence_parameter_name = convergence_variable
convergence_config_dict[convergence_parameter_name] = dict(damping_constant=0.5)
if lock_t_inner_cycles is None:
logger.warning('t_inner update lock cycles not set - defaulting to 1')
lock_t_inner_cycles = 1
if t_inner_update_exponent is None:
logger.warning('t_inner update exponent not set - defaulting to -0.5')
t_inner_update_exponent = -0.5
convergence_config_dict['lock_t_inner_cycles'] = lock_t_inner_cycles
convergence_config_dict['t_inner_update_exponent'] = t_inner_update_exponent
montecarlo_config_dict['convergence'] = convergence_config_dict
###### END of convergence section reading
if 'last_no_of_packets' not in montecarlo_section:
montecarlo_section['last_no_of_packets'] = None
if 'no_of_virtual_packets' not in montecarlo_section:
montecarlo_section['no_of_virtual_packets'] = 0
montecarlo_config_dict.update(montecarlo_section)
disable_electron_scattering = plasma_section.get('disable_electron_scattering', False)
if disable_electron_scattering is False:
logger.info("Electron scattering switched on")
montecarlo_config_dict['sigma_thomson'] =6.652486e-25 / (u.cm**2)
else:
logger.warn('Disabling electron scattering - this is not physical')
montecarlo_config_dict['sigma_thomson'] = 1e-200 / (u.cm**2)
montecarlo_config_dict['enable_reflective_inner_boundary'] = False
montecarlo_config_dict['inner_boundary_albedo'] = 0.0
if 'inner_boundary_albedo' in montecarlo_section:
montecarlo_config_dict['inner_boundary_albedo'] = montecarlo_section['inner_boundary_albedo']
if 'enable_reflective_inner_boundary' not in montecarlo_section:
logger.warn('inner_boundary_albedo set, however enable_reflective_inner_boundary option not specified '
'- defaulting to reflective inner boundary')
montecarlo_config_dict['enable_reflective_inner_boundary'] = True
if 'enable_reflective_inner_boundary' in montecarlo_section:
montecarlo_config_dict['enable_reflective_inner_boundary'] = montecarlo_section['enable_reflective_inner_boundary']
if montecarlo_section['enable_reflective_inner_boundary'] == True and 'inner_boundary_albedo' not in montecarlo_section:
logger.warn('enabled reflective inner boundary, but "inner_boundary_albedo" not set - defaulting to 0.5')
montecarlo_config_dict['inner_boundary_albedo'] = 0.5
if 'black_body_sampling' in montecarlo_section:
black_body_sampling_section = montecarlo_section.pop('black_body_sampling')
sampling_start, sampling_end = parse_spectral_bin(black_body_sampling_section['start'],
black_body_sampling_section['stop'])
montecarlo_config_dict['black_body_sampling']['start'] = sampling_start
montecarlo_config_dict['black_body_sampling']['end'] = sampling_end
montecarlo_config_dict['black_body_sampling']['samples'] = np.int64(black_body_sampling_section['num'])
else:
logger.warn('No "black_body_sampling" section in config file - using defaults of '
'50 - 200000 Angstrom (1e6 samples)')
montecarlo_config_dict['black_body_sampling'] = {}
montecarlo_config_dict['black_body_sampling']['start'] = 50 * u.angstrom
montecarlo_config_dict['black_body_sampling']['end'] = 200000 * u.angstrom
montecarlo_config_dict['black_body_sampling']['samples'] = np.int64(1e6)
config_dict['montecarlo'] = montecarlo_config_dict
##### End of MonteCarlo section
##### spectrum section ######
spectrum_section = raw_dict.pop('spectrum')
spectrum_config_dict = {}
spectrum_frequency = parse_quantity_linspace(spectrum_section).to('Hz', u.spectral())
if spectrum_frequency[0] > spectrum_frequency[1]:
spectrum_frequency = spectrum_frequency[::-1]
spectrum_config_dict['start'] = parse_quantity(spectrum_section['start'])
spectrum_config_dict['end'] = parse_quantity(spectrum_section['stop'])
spectrum_config_dict['bins'] = spectrum_section['num']
spectrum_frequency = np.linspace(spectrum_config_dict['end'].to('Hz', u.spectral()).value,
spectrum_config_dict['start'].to('Hz', u.spectral()).value,
num=spectrum_config_dict['bins'] + 1) * u.Hz
spectrum_config_dict['frequency'] = spectrum_frequency.to('Hz')
config_dict['spectrum'] = spectrum_config_dict
return cls(config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(TARDISConfiguration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause |
Juanlu001/PyFME | examples/example_005_stationary_turn_during_ascent.py | 1 | 4221 | # -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Example
-------
Cessna 172, ISA1976 integrated with Flat Earth (Euler angles).
Example with trimmed aircraft: stationary, turn during ascent.
The main purpose of this example is to check if the aircraft trimmed in a given
state maintains the trimmed flight condition.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyfme.aircrafts import Cessna172
from pyfme.environment.environment import Environment
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment.wind import NoWind
from pyfme.models.systems import EulerFlatEarth
from pyfme.simulator import BatchSimulation
from pyfme.utils.trimmer import steady_state_flight_trimmer
aircraft = Cessna172()
atmosphere = ISA1976()
gravity = VerticalConstant()
wind = NoWind()
environment = Environment(atmosphere, gravity, wind)
# Initial conditions.
TAS = 45 # m/s
h0 = 3000 # m
psi0 = 1.0 # rad
x0, y0 = 0, 0 # m
turn_rate = 0.25 # rad/s
gamma0 = 0.05 # rad
system = EulerFlatEarth(lat=0, lon=0, h=h0, psi=psi0, x_earth=x0, y_earth=y0)
not_trimmed_controls = {'delta_elevator': 0.05,
'delta_aileron': 0.01 * np.sign(turn_rate),
'delta_rudder': 0.01 * np.sign(turn_rate),
'delta_t': 0.5}
controls2trim = ['delta_elevator', 'delta_aileron', 'delta_rudder', 'delta_t']
trimmed_ac, trimmed_sys, trimmed_env, results = steady_state_flight_trimmer(
aircraft, system, environment, TAS=TAS, controls_0=not_trimmed_controls,
controls2trim=controls2trim, gamma=gamma0, turn_rate=turn_rate, verbose=1)
#print(results)
print()
print('delta_elevator = ',"%8.4f" % np.rad2deg(results['delta_elevator']), 'deg')
print('delta_aileron = ', "%8.4f" % np.rad2deg(results['delta_aileron']), 'deg')
print('delta_rudder = ', "%8.4f" % np.rad2deg(results['delta_rudder']), 'deg')
print('delta_t = ', "%8.4f" % results['delta_t'], '%')
print()
print('alpha = ', "%8.4f" % np.rad2deg(results['alpha']), 'deg')
print('beta = ', "%8.4f" % np.rad2deg(results['beta']), 'deg')
print()
print('u = ', "%8.4f" % results['u'], 'm/s')
print('v = ', "%8.4f" % results['v'], 'm/s')
print('w = ', "%8.4f" % results['w'], 'm/s')
print()
print('psi = ', "%8.4f" % np.rad2deg(psi0), 'deg')
print('theta = ', "%8.4f" % np.rad2deg(results['theta']), 'deg')
print('phi = ', "%8.4f" % np.rad2deg(results['phi']), 'deg')
print()
print('p =', "%8.4f" % results['p'], 'rad/s')
print('q =', "%8.4f" % results['q'], 'rad/s')
print('r =', "%8.4f" % results['r'], 'rad/s')
print()
my_simulation = BatchSimulation(trimmed_ac, trimmed_sys, trimmed_env)
tfin = 80 # seconds
N = tfin * 100 + 1
time = np.linspace(0, tfin, N)
initial_controls = trimmed_ac.controls
controls = {}
for control_name, control_value in initial_controls.items():
controls[control_name] = np.ones_like(time) * control_value
my_simulation.set_controls(time, controls)
par_list = ['x_earth', 'y_earth', 'height',
'psi', 'theta', 'phi',
'u', 'v', 'w',
'v_north', 'v_east', 'v_down',
'p', 'q', 'r',
'alpha', 'beta', 'TAS',
'F_xb', 'F_yb', 'F_zb',
'M_xb', 'M_yb', 'M_zb']
my_simulation.set_par_dict(par_list)
my_simulation.run_simulation()
# print(my_simulation.par_dict)
plt.style.use('ggplot')
for ii in range(len(par_list) // 3):
three_params = par_list[3 * ii:3 * ii + 3]
fig, ax = plt.subplots(3, 1, sharex=True)
for jj, par in enumerate(three_params):
ax[jj].plot(time, my_simulation.par_dict[par])
ax[jj].set_ylabel(par)
ax[jj].set_xlabel('time (s)')
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'])
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'] * 0)
ax.set_xlabel('x_earth')
ax.set_ylabel('y_earth')
ax.set_zlabel('z_earth')
plt.show()
| mit |
mlhhu2017/identifyDigit | r.weishaupt/mnist_utils.py | 1 | 4405 | # Import required packages
import numpy as np;
import idx2numpy as idx;
import matplotlib.pyplot as plt;
def loadset(data, labels):
""" Loads data and labels from given paths.
Arguments:
data [string] -- Path to data file
labels [string] -- Path to labels file
Return:
[2-tuple:np.array] -- Returns tuple of numpy-arrays,
first one is the data set, second one is the label set
"""
return tuple(map(idx.convert_from_file, [data, labels]));
def showimg(img, plotsize=[3,3]):
""" Plots a single image as grayscaled pixel map through matplotlib.pyplot.
Arguments:
img [np.array] -- Matrix of integers to be interpreted as
pixels of an image
plotsize [list(integer)] -- Sets the size of the plot for the image.
Return:
[void]
"""
# Create new canvas and plot image without axes
fig = plt.figure(figsize=plotsize);
plt.imshow(img, cmap="gray");
plt.axis("off");
def showimgset(imgs, y = None, dim = 28, plotsize=[10,5]):
""" Plots a set of n images positioned in a y*x-grid through matplotlib.pyplot.
Arguments:
imgs [np.array] -- List of n images
y [int] -- Number of rows, defaults to ceil(sqrt(n))
dim [int] -- The dimension of a single image is dim x dim, defaults to 28
plotsize [list(integer)] -- Sets the size of the plot for the set of images.
Return:
[void]
"""
# Number of images recieved
k = len(imgs);
# At least one image is required
if k < 1:
raise ValueError("No image given!");
# Did we recieve a value for y?
if y == None:
# Calculate default value
y = int(np.ceil(np.sqrt(k)));
# Calculate x value based on y
x = int(np.ceil(k / y));
# Are there enough images given?
if k != x*y:
# Fill up with black images
imgs = np.concatenate((imgs, np.zeros((x*y-k, dim, dim))));
# Reshape to (y*28)x(x*28)-array
imgmap = np.vstack([np.hstack(imgs[j*x:(j+1)*x]) for j in range(y)]);
# Create new canvas, plot image map
fig = plt.figure(figsize=plotsize);
plt.imshow(imgmap, cmap="gray");
plt.axis("off");
def getdigit(digit, data, labels):
""" Returns the first image representing a digit of type num.
Data and labels must be in the same order!
Arguments:
digit [integer] -- Type of digit to be returned
data [np.array] -- List of all digits
labels [np.array] -- List of all labels
Return:
[np.array] -- Returns a single image as np.array
"""
# Search for first occurence of digit in labels
i = 0;
while True:
# Did we find the right one?
if labels[i] == digit:
break;
# Check next one
i += 1;
# Return corresponding image
return data[i];
def getalldigits(digit, data, labels, n=None):
""" Returns list of n images all representing the searched digit.
Arguments:
digit [integer] -- Type of digit to be returned
data [np.array] -- List of all digits
labels [np.array] -- List of all labels
n [integer] -- Amount of digits to be returned, defaults to all
Return:
[np.array] - List of images representing given digit
"""
# Return all entries in data where corresponding labels entry is digit.#
return data[np.where(labels == digit)[0][:n]]
def showconfmatrix(matrix, labels, plotsize=[10,10]):
""" Plots a confusion matrix based on the given input matrix.
Arguments:
matrix [np.array] -- A nxn array of frequency counts
labels [np.array] -- An array of n labels
plotsize [list(integer)] -- Sets the size of the plot for the image.
Return:
[void]
"""
# Save matrix shape
x, y = matrix.shape;
# Create new canvas
fig = plt.figure(figsize=plotsize);
# Add suplot to canvas
ax = fig.add_subplot(111);
# Display matrix
img = ax.imshow(matrix, cmap=plt.cm.Pastel2);
# Add labels to fields
for i in range(x):
for j in range(y):
ax.annotate(str(matrix[i][j]), xy=(j,i),
horizontalalignment="center",
verticalalignment="center");
# Add color bar to the right
cb = fig.colorbar(img);
# Add labels to the axes
plt.xticks(range(x), labels[:x]);
plt.yticks(range(y), labels[:y]);
| mit |
soylentdeen/BlurryApple | Alignment/grid_finder.py | 1 | 1380 | import pyfits
import numpy
import scipy
import matplotlib.pyplot as pyplot
#Calculates the nominal x and y positions for the images of the lenslet array
def generate_model(npeaks, height, fwhm, spacing):
feature_width = int(npeaks*spacing+4*fwhm)
feature_x = numpy.arange(feature_width)
feature_y = numpy.zeros(feature_width)
for i in range(npeaks):
c = (i+1)*spacing
feature_y += height*numpy.exp(-(feature_x-c)**2.0/(2.0*fwhm/2.4)**2.0)
return feature_x, feature_y
def find_centers(n, collapse, h, fwhm, spacing):
x, y = generate_model(n, h, fwhm, spacing)
y_corr = scipy.correlate(collapse, y)
x_corr = scipy.linspace(0, len(y_corr)-1, num=len(y_corr))
peak = x_corr[numpy.argsort(y_corr)[-1]]
centers = []
for i in range(n):
centers.append((i+1)*spacing+peak)
return numpy.array(centers)
nx = 20
ny = 21
df = 'clocking_image.fits'
data = pyfits.getdata(df)
xcollapse = data.sum(axis=0)
ycollapse = data.sum(axis=1)
fwhm = 2.5
spacing = 8.1
height = 0.75*numpy.max(xcollapse)
xcenters = find_centers(nx, xcollapse, height, fwhm, spacing)
ycenters = find_centers(ny, ycollapse, height, fwhm, spacing)
outfile = open('subaperture_positions.dat', 'w')
for xc in xcenters:
outfile.write("%.1f " % xc)
outfile.write("\n")
for yc in ycenters:
outfile.write("%.1f " % yc)
outfile.close()
| gpl-2.0 |
LilithWittmann/airflow | airflow/hooks/presto_hook.py | 37 | 2626 | from builtins import str
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
import logging
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
cursor.execute(self._strip_sql(hql), parameters)
try:
data = cursor.fetchall()
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplemented()
| apache-2.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/misopy/sashimi_plot/Sashimi.py | 1 | 3552 | ##
## Class for representing figures
##
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import misopy.sashimi_plot.plot_utils.plot_settings as plot_settings
import misopy.sashimi_plot.plot_utils.plotting as plotting
class Sashimi:
"""
Representation of a figure.
"""
def __init__(self, label, output_dir, dimensions=None, png=False,
output_filename=None, settings_filename=None,
event=None, chrom=None, no_posteriors=False):
"""
Initialize image settings.
"""
self.output_ext = ".pdf"
if png:
self.output_ext = ".png"
# Plot label, will be used in creating the plot
# output filename
self.label = label
# Set output directory
self.set_output_dir(output_dir)
# Plot settings
self.settings_filename = settings_filename
if self.settings_filename != None:
self.settings = plot_settings.parse_plot_settings(settings_filename,
event=event,
chrom=chrom,
no_posteriors=no_posteriors)
else:
# Load default settings if no settings filename was given
self.settings = plot_settings.get_default_settings()
if output_filename != None:
# If explicit output filename is given to us, use it
self.output_filename = output_filename
else:
# Otherwise, use the label and the output directory
self.set_output_filename()
if dimensions != None:
self.dimensions = dimensions
else:
fig_height = self.settings["fig_height"]
fig_width = self.settings["fig_width"]
print "Reading dimensions from settings..."
print " - Height: %.2f" %(float(fig_height))
print " - Width: %.2f" %(float(fig_width))
self.dimensions = [fig_width, fig_height]
def set_output_dir(self, output_dir):
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
def set_output_filename(self):
plot_basename = "%s%s" %(self.label, self.output_ext)
self.output_filename = os.path.join(self.output_dir, plot_basename)
def setup_figure(self):
print "Setting up plot using dimensions: ", self.dimensions
plt.figure(figsize=self.dimensions)
# If asked, use sans serif fonts
font_size = self.settings["font_size"]
if self.settings["sans_serif"]:
print "Using sans serif fonts."
plotting.make_sans_serif(font_size=font_size)
def save_plot(self, plot_label=None):
"""
Save plot to the output directory. Determine
the file type.
"""
if self.output_filename == None:
raise Exception, "sashimi_plot does not know where to save the plot."
output_fname = None
if plot_label is not None:
# Use custom plot label if given
ext = self.output_filename.rsplit(".")[0]
dirname = os.path.dirname(self.output_filename)
output_fname = \
os.path.dirname(dirname, "%s.%s" %(plot_label, ext))
else:
output_fname = self.output_filename
print "Saving plot to: %s" %(output_fname)
plt.savefig(output_fname)
| apache-2.0 |
acbecker/solar | regress6.py | 1 | 9889 | import sys
import os
import numpy as np
import h5py
import multiprocessing
import cPickle
import ephem
import matplotlib.pyplot as plt
import types
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import train_test_split
from sklearn import metrics, linear_model, tree, ensemble
# NOTE: endless empehm warnings
# DeprecationWarning: PyOS_ascii_strtod and PyOS_ascii_atof are deprecated. Use PyOS_string_to_double instead.
# https://github.com/brandon-rhodes/pyephem/issues/18
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# NOTE, this one does cross-validation of the last 1000 points.
# Instead of a random selections. Duh...
fMapper = {
"apcp_sfc" : "Total_precipitation",
"dlwrf_sfc" : "Downward_Long-Wave_Rad_Flux",
"dswrf_sfc" : "Downward_Short-Wave_Rad_Flux",
"pres_msl" : "Pressure",
"pwat_eatm" : "Precipitable_water",
"spfh_2m" : "Specific_humidity_height_above_ground",
"tcdc_eatm" : "Total_cloud_cover",
"tcolc_eatm" : "Total_Column-Integrated_Condensate",
"tmax_2m" : "Maximum_temperature",
"tmin_2m" : "Minimum_temperature",
"tmp_2m" : "Temperature_height_above_ground",
"tmp_sfc" : "Temperature_surface",
"ulwrf_sfc" : "Upward_Long-Wave_Rad_Flux_surface",
"ulwrf_tatm" : "Upward_Long-Wave_Rad_Flux",
"uswrf_sfc" : "Upward_Short-Wave_Rad_Flux"
}
fKeys = ("apcp_sfc", "dlwrf_sfc", "dswrf_sfc", "pres_msl", "pwat_eatm",
"spfh_2m", "tcdc_eatm", "tcolc_eatm", "tmax_2m", "tmin_2m",
"tmp_2m", "tmp_sfc", "ulwrf_sfc", "ulwrf_tatm", "uswrf_sfc")
NPTSt = 5113 # Train
NPTSp = 1796 # Predict
# Minimal script for gaussian process estimation
class Mesonet(object):
dtimet = np.recarray((NPTSt,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
dtimep = np.recarray((NPTSp,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
def __init__(self, stid, nlat, elon, elev):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Measured data
self.datat = np.recarray((NPTSt,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
self.datap = np.recarray((NPTSp,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
def setAstro(self, time, data):
sun = ephem.Sun()
moon = ephem.Moon()
obs = ephem.Observer()
obs.lon = (self.elon * np.pi / 180) # need radians
obs.lat = (self.nlat * np.pi / 180) # need radians
obs.elevation = self.elev # meters
for i in range(len(time)):
obs.date = str(time[i])
sun.compute(obs)
moon.compute(obs)
# LOGIT ASTRO TERMS
# Sun Alt goes from 0 to 90
# Moon phase goes from 0 to 1
salt = float(180 / np.pi * sun.transit_alt)
salt /= 90.0
mphase = moon.moon_phase
data["sun_alt"][i] = np.log(salt / (1.0 - salt))
data["moon_phase"][i] = np.log(mphase / (1.0 - mphase))
def regressTest(feattr, featcv, fluxtr, fluxcv):
alphas = np.logspace(-5, 1, 6, base=10)
models = []
for alpha in alphas:
models.append(linear_model.Ridge(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.Lasso(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.LassoLars(normalize=True, fit_intercept=True, alpha=alpha))
models.append(ensemble.RandomForestRegressor())
models.append(ensemble.ExtraTreesRegressor())
models.append(ensemble.AdaBoostRegressor())
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=100))
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=1000))
models.append(tree.DecisionTreeRegressor())
models.append(tree.ExtraTreeRegressor())
maes = []
for m in range(len(models)):
model = models[m]
fit = model.fit(feattr, fluxtr)
preds = fit.predict(featcv)
mae = metrics.mean_absolute_error(fluxcv, preds)
print " MAE %d: %.1f" % (m, mae)
maes.append(mae)
idx = np.argsort(maes)
model = models[idx[0]]
print "BEST", maes[idx[0]], model
return model.fit(np.vstack((feattr, featcv)),
np.hstack((fluxtr, fluxcv))
) # fit all data
def sigclip(data, switch):
mean = np.mean(data, axis=1)
std = np.std(data, axis=1)
idx = np.where(std == 0.0)
std[idx] = 1e10
if switch:
nsig = np.abs(data - mean[:,np.newaxis,:]) / std[:,np.newaxis,:]
else:
nsig = np.abs(data - mean[:,np.newaxis]) / std[:,np.newaxis]
idx = np.where(nsig > 3.0)
ma = np.ma.array(data)
ma[idx] = np.ma.masked
return ma.mean(axis=1).data
if __name__ == "__main__":
suffix = sys.argv[1]
trainFile = "gp2_train_%s.pickle" % (suffix)
predFile = "gp2_pred_%s.pickle" % (suffix)
if suffix.find("logit") > -1:
buff = open(trainFile, "rb")
train, fmin, fmax = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred, fmin, fmax = cPickle.load(buff)
buff.close()
else:
buff = open(trainFile, "rb")
train = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred = cPickle.load(buff)
buff.close()
# QUESTION: do we logit the flux? Not sure, might screw up CV interpretation
#pool = multiprocessing.Pool(multiprocessing.cpu_count())
#pool.map(int, range(multiprocessing.cpu_count())) # Trick to "warm up" the Pool
# Need to load the positions and times of training data
sdata = np.loadtxt("../station_info.csv", delimiter=",", skiprows=1,
dtype = [("stid", np.str_, 4),
("nlat", np.float64),
("elon", np.float64),
("elev", np.float64)])
fields = np.loadtxt("../train.csv", skiprows=1, delimiter=",", dtype=np.int64)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[:,0]]
Mesonet.dtimet = dates
mesonets = {}
for sidx in range(len(sdata)):
s = sdata[sidx]
station = Mesonet(s[0], s[1], s[2], s[3])
station.datat["flux"] = fields[:,sidx+1]
mesonets[s[0]] = station
# Dates of prediction data
fields = np.loadtxt("../sampleSubmission.csv", skiprows=1, delimiter=",", unpack=True).astype(np.int)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[0]]
Mesonet.dtimep = dates
sdates = [np.str(x) for x in fields[0]]
# Do we do Astro terms?
useAstro = 0
if useAstro:
for mesonet in mesonets.values():
mesonet.setAstro(mesonet.dtimet, mesonet.datat)
mesonet.setAstro(mesonet.dtimep, mesonet.datap)
nCv = 1000
nTr = NPTSt-nCv
# Regress each Mesonet site on its own
for mKey in mesonets.keys():
print "%s" % (mKey)
feattr = np.empty((nTr, len(fKeys) + 2 * useAstro))
featcv = np.empty((nCv, len(fKeys) + 2 * useAstro))
for f in range(len(fKeys)):
fKey = fKeys[f]
data1 = sigclip(train[mKey].pdata[fKey].reshape((NPTSt, 11, 5)), True)
data2 = sigclip(data1, False)
feattr[:,f] = data2[:-nCv]
featcv[:,f] = data2[-nCv:]
if useAstro:
feattr[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"][:-nCv]
feattr[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][:-nCv]
featcv[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"][-nCv:]
featcv[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][-nCv:]
fluxtr = mesonets[mKey].datat["flux"][:-nCv]
fluxcv = mesonets[mKey].datat["flux"][-nCv:]
regressTest(feattr, featcv, fluxtr, fluxcv)
##########3
##########3
##########3
##########3
# Now regress all sites at once
print "ALL"
feattr = np.empty((nTr * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxtr = np.empty((nTr * len(mesonets.keys())))
featcv = np.empty((nCv * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxcv = np.empty((nCv * len(mesonets.keys())))
fIdx = 0
for mKey in mesonets.keys():
for f in range(len(fKeys)):
fKey = fKeys[f]
data1 = sigclip(train[mKey].pdata[fKey].reshape((NPTSt, 11, 5)), True)
data2 = sigclip(data1, False)
feattr[fIdx*nTr:(fIdx*nTr + nTr),f] = data2[:-nCv]
featcv[fIdx*nCv:(fIdx*nCv + nCv),f] = data2[-nCv:]
if useAstro:
feattr[fIdx*nTr:(fIdx*nTr + nTr),len(fKeys)] = mesonets[mKey].datat["sun_alt"][:-nCv]
feattr[fIdx*nTr:(fIdx*nTr + nTr),len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][:-nCv]
featcv[fIdx*nCv:(fIdx*nCv + nCv),len(fKeys)] = mesonets[mKey].datat["sun_alt"][-nCv:]
featcv[fIdx*nCv:(fIdx*nCv + nCv),len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][-nCv:]
fluxtr[fIdx*nTr:(fIdx*nTr + nTr)] = mesonets[mKey].datat["flux"][:-nCv]
fluxcv[fIdx*nCv:(fIdx*nCv + nCv)] = mesonets[mKey].datat["flux"][-nCv:]
fIdx += 1
regressTest(feattr, featcv, fluxtr, fluxcv)
| mit |
nesterione/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/container.py | 3 | 2832 | import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
"""
def __repr__(self):
return "<Container object of %d artists>" % (len(self))
def __new__(cls, *kl, **kwargs):
return tuple.__new__(cls, kl[0])
def __init__(self, kl, label=None):
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self._remove_method = None
self.set_label(label)
def set_remove_method(self, f):
self._remove_method = f
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def get_children(self):
return list(cbook.flatten(self))
class BarContainer(Container):
def __init__(self, patches, errorbar=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
Container.__init__(self, patches, **kwargs)
class ErrorbarContainer(Container):
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
Container.__init__(self, lines, **kwargs)
class StemContainer(Container):
def __init__(self, markerline_stemlines_baseline, **kwargs):
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
Container.__init__(self, markerline_stemlines_baseline, **kwargs)
| gpl-2.0 |
hofmannedv/training-python | usecases/drivers-log/distanz-m.py | 1 | 3460 | #!/usr/bin/python
import fileinput
import re
from datetime import datetime
# import external NumPy module
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
init = True
# preparing the data
for line in fileinput.input():
line = re.sub('\n', '', line)
columns = re.split('\t+', line)
if init:
data = np.array([columns])
init = False
else:
columns[0] = datetime.strptime(columns[0], '%Y-%m-%d %H:%M:%S')
columns[1] = datetime.strptime(columns[1], '%Y-%m-%d %H:%M:%S')
data2 = np.array([columns])
data = np.vstack((data, data2))
monthlyRanges = np.array([
['Ganzes Jahr', '2017-01-01 00:00:00', '2017-12-31 23:59:59'],
['Januar', '2017-01-01 00:00:00', '2017-01-31 23:59:59'],
['Februar', '2017-02-01 00:00:00', '2017-02-28 23:59:59'],
['März', '2017-03-01 00:00:00', '2017-03-31 23:59:59'],
['April', '2017-04-01 00:00:00', '2017-04-30 23:59:59'],
['Mai', '2017-05-01 00:00:00', '2017-05-31 23:59:59'],
['Juni', '2017-06-01 00:00:00', '2017-06-30 23:59:59'],
['Juli', '2017-07-01 00:00:00', '2017-07-31 23:59:59'],
['August', '2017-08-01 00:00:00', '2017-08-31 23:59:59'],
['September', '2017-09-01 00:00:00', '2017-09-30 23:59:59'],
['Oktober', '2017-10-01 00:00:00', '2017-10-31 23:59:59'],
['November', '2017-11-01 00:00:00', '2017-11-30 23:59:59'],
['Dezember', '2017-12-01 00:00:00', '2017-12-31 23:59:59']
])
# define travel routes
travelRoutes = np.array([])
for entry in data[1:]:
route = np.array("%s->%s:%s" % (entry[2], entry[3], entry[4]))
travelRoutes = np.hstack((travelRoutes, route))
# remove double entries
travelRoutes = np.unique(travelRoutes)
# print (travelRoutes)
# extract descriptions for every month
month = monthlyRanges[1:, 0:1]
month = np.ravel(month)
# print (month)
# define number of travels per month
# - init with zeros
numberOfTravels = np.zeros((travelRoutes.size, month.size), dtype = np.int32)
# print (numberOfTravels)
for entry in data[1:]:
# find list id for the travel route
route = np.array("%s->%s:%s" % (entry[2], entry[3], entry[4]))
travelRouteId = np.where(travelRoutes == route)[0][0]
# extract travel data
travelRouteDateFrom = entry[0]
travelRouteDateTo = entry[1]
monthId = 0
for entry in monthlyRanges[1:]:
# extract month data range
monthDateFrom = datetime.strptime(entry[1], '%Y-%m-%d %H:%M:%S')
monthDateTo = datetime.strptime(entry[2], '%Y-%m-%d %H:%M:%S')
# validate route for being in month range
if ((travelRouteDateFrom >= monthDateFrom) and (travelRouteDateFrom <= monthDateTo)):
if ((travelRouteDateTo >= monthDateFrom) and (travelRouteDateTo <= monthDateTo)):
numberOfTravels[travelRouteId][monthId] += 1
break
monthId += 1
# print (numberOfTravels)
# define distance per entry
distances = np.array([])
entryId = 0
travelDescription = []
for entry in travelRoutes:
columns = re.split(':', entry)
factor = int(columns[1])
distances = np.append(distances, [numberOfTravels[entryId] * factor])
entryId += 1
travelDescription.append(columns[0])
# re-arrange the distances array
distances = np.reshape(distances, (travelRoutes.size,month.size))
distances = distances.swapaxes(1,0)
# create data frame
df = pd.DataFrame(distances,
index=month,
columns=pd.Index(travelDescription))
# plot the data frame as stacked, horizontal bars
df.plot(kind='barh', stacked=True)
# display the data frame
plt.show()
| gpl-2.0 |
hildensia/joint_dependency | joint_dependency/interpret_results.py | 1 | 3372 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 21 10:23:46 2016
"""
import argparse
import cPickle
import seaborn as sns
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
from matplotlib.lines import Line2D
import re
sns.set_style("darkgrid")
lscol_ptn = re.compile("LockingState([0-9]+)")
def determine_num_joints(df, _):
return len([ lscol_ptn.match(c).group(1) for c in df.columns if lscol_ptn.match(c) is not None])
def plot_locking_states(df, meta, num_joints=None):
marker_style = dict(linestyle=':', marker='o', s=100,)
def format_axes(ax):
ax.margins(0.2)
ax.set_axis_off()
if num_joints is None:
num_joints = determine_num_joints(df)
points = np.ones(num_joints)
fig, ax = plt.subplots()
for j in range(num_joints):
ax.text(-1.5, j, "%d" % j)
ax.text(0, -1.5, "time")
for t in df.index:
lock_states = df.loc[t][ [ "LockingState%d" % k for k in range(num_joints) ] ].tolist()
c = ["orange" if l else "k" for l in lock_states]
ax.scatter((t+0.1) * points, range(num_joints), color=c, **marker_style)
format_axes(ax)
ax.set_title('Locking state evolution')
ax.set_xlabel("t")
plt.plot()
def plot_entropy(df, meta, num_joints=None):
if num_joints is None:
num_joints = determine_num_joints(df)
plt.figure()
for j in range(num_joints):
var_name="Entropy%d"%j
plt.plot(df[var_name], label=var_name)
plt.legend()
def plot_dependency_posterior(df, meta, t, num_joints=None):
if num_joints is None:
num_joints = determine_num_joints(df)
plt.figure()
posterior=np.array([df["Posterior%d"%j].iloc[t] for j in range(num_joints)])
plt.matshow(posterior, interpolation='nearest')
plt.show()
def print_actions(df, num_joints=None):
pd.options.display.float_format = '{:,.2f}'.format
pd.set_option('expand_frame_repr', False)
if num_joints is None:
num_joints = determine_num_joints(df, None)
print(df[[u'CheckedJoint'] +
['DesiredPos{}'.format(j) for j in range(num_joints)] +
['LockingState{}'.format(j) for j in range(num_joints)]
])
#Index([u'DesiredPos0', u'DesiredPos1', u'DesiredPos2', u'DesiredPos3',
# u'DesiredPos4', u'CheckedJoint', u'RealPos0', u'RealPos1', u'RealPos2',
# u'RealPos3', u'RealPos4', u'LockingState0', u'LockingState1',
# u'LockingState2', u'LockingState3', u'LockingState4', u'Posterior0',
# u'Entropy0', u'Posterior1', u'Entropy1', u'Posterior2', u'Entropy2',
# u'Posterior3', u'Entropy3', u'Posterior4', u'Entropy4'],
# dtype='object')
def open_pickle_file(pkl_file):
with open(pkl_file) as f:
df, meta = cPickle.load(f)
return df, meta
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", required=True,
help="pickle file")
args = parser.parse_args()
df, meta = open_pickle_file(args.file)
print_actions(df)
plot_locking_states(df, meta, num_joints=determine_num_joints(df, meta))
plot_entropy(df,meta, num_joints=determine_num_joints(df, meta))
plot_dependency_posterior(df,meta,-1, num_joints=determine_num_joints(df, meta))
plt.show()
| mit |
giorgiop/scikit-learn | examples/applications/plot_prediction_latency.py | 85 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
multipath-tcp/mptcp-analysis-scripts | scripts_graph/address_stat.py | 1 | 3509 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Quentin De Coninck
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# To install on this machine: matplotlib, numpy
from __future__ import print_function
import argparse
import matplotlib
# Do not use any X11 backend
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# Add root directory in Python path and be at the root
ROOT_DIR = os.path.abspath(os.path.join(".", os.pardir))
os.chdir(ROOT_DIR)
sys.path.append(ROOT_DIR)
import common as co
import common_graph as cog
import mptcp
import tcp
##################################################
## ARGUMENTS ##
##################################################
parser = argparse.ArgumentParser(
description="Summarize stat files generated by analyze")
parser.add_argument("-s",
"--stat", help="directory where the stat files are stored", default=co.DEF_STAT_DIR + '_' + co.DEF_IFACE)
parser.add_argument('-S',
"--sums", help="directory where the summary graphs will be stored", default=co.DEF_SUMS_DIR + '_' + co.DEF_IFACE)
parser.add_argument("-d",
"--dirs", help="list of directories to aggregate", nargs="+")
args = parser.parse_args()
stat_dir_exp = os.path.abspath(os.path.join(ROOT_DIR, args.stat))
sums_dir_exp = os.path.abspath(os.path.join(ROOT_DIR, args.sums))
co.check_directory_exists(sums_dir_exp)
##################################################
## GET THE DATA ##
##################################################
connections = cog.fetch_valid_data(stat_dir_exp, args)
# multiflow_connections, singleflow_connections = cog.get_multiflow_connections(connections)
##################################################
## PLOTTING RESULTS ##
##################################################
ip_addrs = {}
saddrs = {}
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
port = conn.flows[0].attr.get(co.SOCKS_PORT, conn.attr.get(co.SOCKS_PORT, None))
# Apache JServ Port
if port and port == 8009:
ip_addr = conn.flows[0].attr.get(co.SOCKS_DADDR, conn.attr.get(co.SOCKS_DADDR, None))
if ip_addr not in ip_addrs:
ip_addrs[ip_addr] = 1
else:
ip_addrs[ip_addr] += 1
saddr = conn.flows[0].attr.get(co.SADDR, conn.attr.get(co.SADDR, None))
if saddr not in saddrs:
saddrs[saddr] = 1
else:
saddrs[saddr] += 1
print(fname, conn_id, ip_addr, saddr)
print("IP ADDRS", ip_addrs)
print("SOURCE IP ADDRS", saddrs)
| gpl-3.0 |
spallavolu/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
massmutual/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
tmerrick1/spack | var/spack/repos/builtin/packages/py-xarray/package.py | 5 | 1699 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyXarray(PythonPackage):
"""N-D labeled arrays and datasets in Python"""
homepage = "https://github.com/pydata/xarray"
url = "https://pypi.io/packages/source/x/xarray/xarray-0.9.1.tar.gz"
version('0.9.1', '24cc99f19da95427604846c9d1e20e70')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 |
DerThorsten/boring_spaghetti | src/interfaces/python/examples/mrf/denoise.py | 6 | 6709 | # FIXMEEEEEEEEEEE
import opengm
import vigra # only to read images
import numpy
#import sys
# to animate the current labeling matplotlib is used
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
class PyCallback(object):
"""
callback functor which will be passed to an inference
visitor.
In that way, pure python code can be injected into the c++ inference.
This functor visualizes the labeling as an image during inference.
Args :
shape : shape of the image
numLabels : number of labels
"""
def __init__(self,shape,numLabels):
self.shape=shape
self.numLabels=numLabels
matplotlib.interactive(True)
def begin(self,inference):
"""
this function is called from c++ when inference is started
Args :
inference : python wrapped c++ solver which is passed from c++
"""
print "begin"
def end(self,inference):
"""
this function is called from c++ when inference ends
Args :
inference : python wrapped c++ solver which is passed from c++
"""
print "end"
def visit(self,inference):
"""
this function is called from c++ each time the visitor is called
Args :
inference : python wrapped c++ solver which is passed from c++
"""
arg = inference.arg()
gm = inference.gm()
print "energy ",gm.evaluate(arg)
arg=arg.reshape(self.shape)*255
plt.imshow(arg.T, cmap='gray',interpolation="nearest")
plt.draw()
def denoiseModel(
img,
norm = 2,
weight = 1.0,
truncate = None,
numLabels = 256,
neighbourhood = 4,
inpaintPixels = None,
randInpaitStartingPoint = False
):
"""
this function is used to set up a graphical model similar to
**Denoising and inpainting problems:** from `Mrf- Benchmark <http://vision.middlebury.edu/MRF/results/ >`_
Args :
img : a grayscale image in the range [0,256)
norm : used norm for unaries and 2-order functions (default : 2)
weight : weight of 2-order functions (default : 1.0)
truncate : Truncate second order function at an given value (defaut : None)
numLabels : number of labels for each variable in the graphical model,
set this to a lower number to speed up inference (default : 255)
neighbourhood : neighbourhood for the second order functions, so far only 4 is allowed (default : 4)
inpaintPixels : a tuple of x and y coordinates where no unaries are added
randInpaitStartingPoint : use a random starting point for all pixels without unaries (default : False)
"""
shape = img.shape
if(img.ndim!=2):
raise RuntimeError("image must be gray")
if neighbourhood != 4 :
raise RuntimeError("A neighbourhood other than 4 is not yet implemented")
# normalize and flatten image
iMin = numpy.min(img)
iMax = numpy.max(img)
imgNorm = ((img[:,:]-iMin)/(iMax-iMin))*float(numLabels)
imgFlat = imgNorm.reshape(-1).astype(numpy.uint64)
# Set up Grapical Model:
numVar = int(img.size)
gm = opengm.gm([numLabels]*numVar,operator='adder')
gm.reserveFunctions(numLabels,'explicit')
numberOfPairwiseFactors=shape[0]*(shape[1]-1) + shape[1]*(shape[0]-1)
gm.reserveFactors(numVar-len(inpaintPixels[0]) + numberOfPairwiseFactors )
# Set up unaries:
# - create a range of all possible labels
allPossiblePixelValues=numpy.arange(numLabels)
pixelValueRep = numpy.repeat(allPossiblePixelValues[:,numpy.newaxis],numLabels,1)
# - repeat [0,1,2,3,...,253,254,255] numVar times
labelRange = numpy.arange(numLabels,dtype=opengm.value_type)
labelRange = numpy.repeat(labelRange[numpy.newaxis,:], numLabels, 0)
unaries = numpy.abs(pixelValueRep - labelRange)**norm
# - add unaries to the graphical model
fids=gm.addFunctions(unaries.astype(opengm.value_type))
# add unary factors to graphical model
if(inpaintPixels is None):
for l in xrange(numLabels):
whereL=numpy.where(imgFlat==l)
gm.addFactors(fids[l],whereL[0].astype(opengm.index_type))
else:
# get vis of inpaint pixels
ipX = inpaintPixels[0]
ipY = inpaintPixels[1]
ipVi = ipX*shape[1] + ipY
for l in xrange(numLabels):
whereL=numpy.where(imgFlat==l)
notInInpaint=numpy.setdiff1d(whereL[0],ipVi)
gm.addFactors(fids[l],notInInpaint.astype(opengm.index_type))
# add ONE second order function
f=opengm.differenceFunction(shape=[numLabels,numLabels],norm=2,weight=weight)
fid=gm.addFunction(f)
vis2Order=opengm.secondOrderGridVis(shape[0],shape[1],True)
# add all second order factors
gm.addFactors(fid,vis2Order)
# create a starting point
startingPoint = imgFlat.copy()
if randInpaitStartingPoint :
startingPointRandom = numpy.random.randint(0,numLabels,size=numVar).astype(opengm.index_type)
ipVi = inpaintPixels[0]*shape[1] + inpaintPixels[1]
for x in ipVi:
startingPoint[x]=startingPointRandom[x]
startingPoint[startingPoint==numLabels]=numLabels-1
return gm,startingPoint.astype(opengm.index_type)
if __name__ == "__main__":
# setup
imgPath = 'houseM-input.png'
norm = 2
weight = 5.0
numLabels = 50 # use 256 for full-model (slow)
# Read image
img = numpy.array(numpy.squeeze(vigra.impex.readImage(imgPath)),dtype=opengm.value_type)#[0:100,0:40]
shape = img.shape
# get graphical model an starting point
gm,startingPoint=denoiseModel(img,norm=norm,weight=weight,inpaintPixels=numpy.where(img==0),
numLabels=numLabels,randInpaitStartingPoint=True)
inf=opengm.inference.Imc(gm,parameter=opengm.InfParam())
print "inf"
inf.setStartingPoint(inf.arg())
# set up visitor
callback=PyCallback(shape,numLabels)
visitor=inf.pythonVisitor(callback,visitNth=1)
inf.infer(visitor)
# get the result
arg=inf.arg()
arg=arg.reshape(shape)
# plot final result
matplotlib.interactive(False)
# Two subplots, the axes array is 1-d
f, axarr = plt.subplots(1,2)
axarr[0].imshow(img.T, cmap = cm.Greys_r)
axarr[0].set_title('Input Image')
axarr[1].imshow(arg.T, cmap = cm.Greys_r)
axarr[1].set_title('Solution')
plt.show() | mit |
abhishekkrthakur/scikit-learn | sklearn/utils/validation.py | 2 | 20807 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: %s"
% str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2d numpy array.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when ``ensure_2d`` is True and
``allow_nd`` is False. Setting to 0 disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and ensure_2d and not allow_nd:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array
def check_X_y(X, y, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D X has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when ``ensure_2d`` is True and
``allow_nd`` is False.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
SarthakJShetty/MyDonkey | scripts/sdsandbox_drive.py | 1 | 6352 | #!/usr/bin/env python
'''
Predict Server
Create a server to accept image inputs and run them against a trained neural network.
This then sends the steering output back to the client.
Author: Tawn Kramer
Modified: Alan Steremberg
'''
import os
import argparse
import sys
import numpy as np
import h5py
import json
#import matplotlib.pyplot as plt
import time
import asyncore
import json
import socket
from PIL import Image
import struct
import donkey as dk
import cv2
class RemoteSteeringServer(asyncore.dispatcher):
"""Receives connections and establishes handlers for each client.
"""
def __init__(self, address, pilot):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(address)
self.address = self.socket.getsockname()
print ('binding to', self.address)
self.listen(1)
self.pilot= pilot
return
def handle_accept(self):
# Called when a client connects to our socket
client_info = self.accept()
#self.logger.debug('handle_accept() -> %s', client_info[1])
print ('got a new client', client_info[1])
h = SteeringHandler(sock=client_info[0], chunk_size=8*1024, pilot=self.pilot)
return
def handle_close(self):
#self.logger.debug('handle_close()')
self.close()
return
class SteeringHandler(asyncore.dispatcher):
"""Handles echoing messages from a single client.
"""
IDLE = 1
GETTING_IMG = 2
SENDING_STEERING = 3
def __init__(self, sock, chunk_size=256, pilot=None):
self.pilot= pilot
self.chunk_size = chunk_size
asyncore.dispatcher.__init__(self, sock=sock)
self.data_to_write = []
self.image_byes = []
self.mode = self.IDLE
self.start_time = time.time()
return
def writable(self):
"""We want to write if we have received data."""
response = bool(self.data_to_write)
return response
def handle_write(self):
"""Write as much as possible of the most recent message we have received."""
data = self.data_to_write.pop()
print('data:'+data)
sent = self.send(data[:self.chunk_size].encode('utf-8'))
print('sent:'+str(sent)+' len(data):'+str(len(data)))
if sent < len(data):
remaining = data[sent:].encode('utf-8')
self.data.to_write.append(remaining)
elif self.mode == self.SENDING_STEERING:
self.mode = self.IDLE
def handle_read(self):
"""Read an incoming message from the client and put it into our outgoing queue."""
data = self.recv(self.chunk_size)
view_image = False
#print ('got', len(data), 'bytes')
if len(data) == 0:
self.handle_close()
elif self.mode == self.IDLE:
try:
jsonObj = json.loads(data.decode('utf-8'))
self.num_bytes = jsonObj['num_bytes']
self.width = jsonObj['width']
self.height = jsonObj['height']
self.num_channels = jsonObj['num_channels']
self.format = jsonObj['format']
self.flip_y = jsonObj['flip_y']
self.data_to_write.insert(0, "{ 'response' : 'ready_for_image' }")
self.mode = self.GETTING_IMG
self.image_byes = []
self.num_read = 0
except:
self.mode = self.IDLE
print ('failed to read json from: ', data)
elif self.mode == self.GETTING_IMG:
self.image_byes.append(data)
self.num_read += len(data)
if self.num_read == self.num_bytes:
lin_arr = np.fromstring(b''.join(self.image_byes), dtype=np.uint8)
self.mode = self.SENDING_STEERING
if self.format == 'array_of_pixels':
img = lin_arr.reshape(self.width, self.height, self.num_channels)
if self.flip_y:
img = np.flipud(img)
img = img.transpose()
else: #assumed to be ArrayOfChannels
img = lin_arr.reshape(self.num_channels, self.width, self.height)
if view_image:
#this can be useful when validating that you have your images coming in correctly.
vis_img = Image.fromarray(img.transpose(), 'RGB')
vis_img.show()
#this launches too many windows if we leave it up.
self.handle_close()
# hook up donkey here
now = time.time()
milliseconds = int( (now - self.start_time) * 1000)
# make the image_arr the right size
#print(img.shape)
#vis_img = Image.fromarray(img.transpose(), 'RGB')
#print(vis_img.shape)
img_arr = cv2.resize(img.transpose(),(160,160))
#print(img_arr.shape)
#img_arr = img_arr[:, 00:120]
img_arr = img_arr[ :120, :]
#print(img_arr.shape)
angle=0
throttle=0
angle, throttle = self.pilot.decide( img_arr,
angle,
throttle,
milliseconds)
print('angle: %s throttle: %s' %(angle, throttle) )
#steering = self.model.predict(img[None, :, :, :])
steering = angle
reply = '{ "steering" : "%f", "throttle" : "%f" }' % (steering,throttle)
self.data_to_write.append(reply)
else:
print ("wasn't prepared to recv request!")
def handle_close(self):
self.close()
def go(address,remote_url):
#Get all autopilot signals from remote host
mypilot = dk.remotes.RemoteClient(remote_url, vehicle_id='mycar')
s = RemoteSteeringServer(address, mypilot)
try:
asyncore.loop()
except KeyboardInterrupt:
print ('stopping')
# ***** main loop *****
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='prediction server')
parser.add_argument('--remote', dest='remote', default='http://localhost:8887', help='remote url')
args = parser.parse_args()
#
# Let's initialize the donkey things
#
address = ('0.0.0.0', 9090)
go(address,args.remote)
| mit |
wcmckee/wcmckee | artcontrolme.py | 1 | 5679 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# **artcontrolme**
#
#
# Python script to deal with exported artcontrolme static files. The files are located at https://github.com/artcontrol/artcontrolme. '.wp' files that contain HTML of posts. '.meta' files contain name, permalink, date, cat, tags, id.
#
# The script deletes all tag data from the .meta files
# It performs a nikola build and rsync to server.
#
# <markdowncell>
# **TODO**
#
# sort dict of permalink and time published.
# render index.html that shows recent 10 works
# <codecell>
import os
import pandas as pd
import json
# <codecell>
#artcpost = ('/home/wcmckee/artcontrolme/posts')
# <codecell>
postsdir = (u'/home/wcmckee/artcontrolme/posts')
# <codecell>
os.chdir(postsdir)
# <codecell>
alfilz = []
# <codecell>
for filz in os.listdir(postsdir):
print filz
alfilz.append(filz)
# <codecell>
alfilz
# <codecell>
for bleh in alfilz:
if '.wp' in bleh:
print bleh
# <codecell>
#file = open('alcemy-landscape.wp', 'r')
#print file.read()
# <codecell>
lisblog = []
metablog = []
dictblog = {}
finlis = []
# <codecell>
teop = open('test.txt', 'w')
# <codecell>
#f=open('filename')
#lines=f.readlines()
#print lines[26]
#print lines[30]
# <codecell>
# <codecell>
for bleh in alfilz:
if '.meta' in bleh:
#print bleh
#filez = open('alcemy-landscape.wp', "r")
#filez.read
#filez.close()
file = open(bleh, 'r')
metaf = file.readlines()
#print file.read()
#metablog.append(file.readline())
#metablog.append(file.readline())
#metablog.append(file.readline())
#print metaf[2]
#adrdir = {metaf[0]: metaf[2]}
#chzdir = ({'test': 'test'})
#chzdir.update({metaf[2]: metaf[1]})
chzdir = {metaf[2].rstrip(): metaf[1].rstrip()}#, file.readline()}
#print chzdir
finlis.append(chzdir)
teop.write(file.read())
# <codecell>
chzdir
# <codecell>
chzdir.items()
# <codecell>
tuplis = []
# <codecell>
derplis = []
# <codecell>
import collections
# <codecell>
dez = collections.OrderedDict()
# <codecell>
metaf
# <codecell>
cherdict = dict()
# <codecell>
listag = []
# <codecell>
jslis = []
# <codecell>
for bleh in alfilz:
if '.meta' in bleh:
#print bleh
#filez = open('alcemy-landscape.wp', "r")
#filez.read
#filez.close()
file = open(bleh, 'r')
metaf = file.readlines()
#print file.read()
#metablog.append(file.readline())
#metablog.append(file.readline())
#metablog.append(file.readline())
#print metaf[2]
#adrdir = {metaf[0]: metaf[2]}
#chzdir = ({'name': metaf[0].rstrip()})#, file.readline()}
#cherdict.update({metaf[2].rstrip(): metaf[1].rstrip()})
#cherdict.update({metaf[1]: metaf[3].rstrip()})
#listag.append(metaf[3].rstrip())
#chzdir.update({'title': metaf[1].rstrip()})
#chzdir.update({'date': metaf[2].rstrip()})
#chzdir.update({'tags': metaf[3].rstrip()})
print chzdir
metawnm = open(bleh, 'w')
metawnm.write(metaf[0] + metaf[1] + metaf[2])
print (metaf[0] + metaf[1] + metaf[2])
metawnm.close()
finlis.append(chzdir)
cherjson = json.dumps(chzdir)
jslis.append(cherjson)
teop.write(file.read())
# <codecell>
cherjson = json.dumps(chzdir)
# <codecell>
listag
# <codecell>
cherjson
# <codecell>
savus = open('/home/wcmckee/visignsys/artcontrol.json', 'a')
savus.write(str(jslis))
savus.close()
# <codecell>
chzdir.items()
# <codecell>
chval = chzdir.values()
# <codecell>
itm = len(chval)
# <codecell>
itm
# <codecell>
for chre in range(itm):
print chval[chre]
# <codecell>
for chz in chzdir:
print chz
# <codecell>
for fin in finlis:
#print sorted(fin.keys())
for fez in fin.keys():
#sorted(fez)
print (fez)
derplis.append(fez)
tuplis.append(fin.keys())
# <codecell>
sordep = sorted(derplis)
# <markdowncell>
# I want to add the correct permalink to the sorted list of publish dates.
#
# Take two dict.
# dict1 is original sort time : permalink
# dict2 is sorted time : NEED TO ADD CORRECT PERMALINK HERE
#
# look at the time in first dict from dict2 and add permalink as its value.
#
# if numba ('2011-07etc') in timlist:
# get its value (permalink). write this as value for dict2.
#
# Need to add html content, tags, and cats once this is sorted. should be easy since accessing it with var[num of line]
# <codecell>
sordep
# <codecell>
metablog
# <codecell>
for neta in metablog:
print neta
# <codecell>
for bleh in alfilz:
if '.wp' in bleh:
#print bleh
#filez = open('alcemy-landscape.wp', "r")
#filez.read
#filez.close()
file = open(bleh, 'r')
#print file.read()
lisblog.append(file.read())
teop.write(file.read())
# <codecell>
len(lisblog)
# <codecell>
filez = open('alcemy-landscape.wp', "r")
filez.read()
filez.close()
# <codecell>
import dominate
from dominate.tags import *
doc = dominate.document(title='artcontrolme')
with doc.head:
link(rel='stylesheet', href='style.css')
script(type='text/javascript', src='script.js')
h1('artcontrolme')
h3('The Art Of WCMCKEE')
with doc:
with div(id='header').add(ol()):
for chre in range(itm):
p(chval[chre])
#with div():
# attr(cls='body')
# p('Lorem ipsum..')
print doc
# <codecell>
# <codecell>
# <codecell>
| mit |
ubenu/Blits | src/blitspak/blits_data.py | 1 | 5311 | '''
Created on 23 May 2017
@author: schilsm
'''
# -*- coding: utf-8 -*-
import pandas as pd, numpy as np, copy as cp
# pd.options.mode.chained_assignment = None
# suppresses unnecessary warning when creating working data
class BlitsData():
def __init__(self, max_points=1000):
# attributes
self.file_name = ""
self.raw_data = None
self.series_names = None # same as self.series_dict.keys, but in order of input
self.axis_names = None
self.series_dict = {}
self.max_points = max_points
if max_points < 1:
self.max_points = np.inf
def has_data(self):
return len(self.series_dict) > 0
def get_axes_names(self):
return cp.deepcopy(self.axis_names)
def get_series_names(self):
return cp.deepcopy(self.series_names)
def get_series_copy(self, name):
if name in self.series_dict:
return cp.deepcopy(self.series_dict[name])
else:
return None
def import_data(self, file_path):
self.raw_data = pd.read_csv(file_path)
self.create_working_data_from_file()
def export_results(self, file_path):
r = self.results.to_csv()
p = self.get_fractional_saturation_params_dataframe().to_csv()
f = self.get_fractional_saturation_curve().to_csv()
with open(file_path, 'w') as file:
file.write(r)
file.write('\n')
with open(file_path, 'a') as file:
file.write(p)
file.write('\n')
file.write(f)
def create_working_data_from_file(self):
n_cols = len(self.raw_data.columns)
named_cols = self.raw_data.columns[~self.raw_data.columns.str.contains('unnamed', case=False)].values
self.series_names = named_cols
n_series = len(named_cols)
n_cols_per_series = n_cols // n_series
n_independents = n_cols_per_series - 1
# Split data set in individual series
self.series_dict = {}
axis_names = []
for s in range(0, n_cols , n_cols_per_series):
df = pd.DataFrame(self.raw_data.iloc[:, s:s+n_cols_per_series]).dropna()
s_name = df.columns.tolist()[0]
axis_names = ['x{}'.format(i) for i in range(n_independents)]
cols = cp.deepcopy(axis_names)
cols.append(s_name)
df.columns = cols
df = df.sort_values(by='x0')
step = len(df) // self.max_points
if step > 1:
r = np.arange(len(df))
filt = np.mod(r, step) == 0
df = df[filt]
ix = pd.Index(np.arange(len(df)))
df.set_index(ix, inplace=True)
self.series_dict[s_name] = df
self.axis_names = np.array(axis_names)
def create_working_data_from_template(self, template):
"""
@template:
template for series construction, consisting of two pandas DataFrames,
with template[0] containing the series axes values and a column for the calculated dependent,
template[1] containing the parameter values for each axis, and
template[2] the modelling function
PS: this is for the chop!
"""
n_axes = len(template[2].independents)
splits = np.arange(1, len(template[0].columns)//(n_axes+1)) * (n_axes+1)
all_series = np.split(template[0], splits, axis=1)
self.series_names = []
self.axis_names = []
for s in all_series:
name = s.columns[-1]
self.series_names.append(name)
axes_names = s.columns[:-1]
self.axis_names = cp.deepcopy(axes_names).tolist() # not pretty: overwrites previous; no check is made
s_new = cp.deepcopy(s).dropna()
self.series_dict[name] = s_new
self.series_names = np.array(self.series_names)
def series_extremes(self):
"""
Returns two pandas DataFrame, one with the minimum values for each row in each series
and one with the maximum values. Returned DataFrames have the series names as index, and
the axes names + 'y' (ie the dependent) as columns.
"""
if self.series_names is not None:
if self.axis_names is not None:
index = np.concatenate((self.get_axes_names(), ['y']))
df_mins = pd.DataFrame(index=index)
# last index is called y because the dependents have different names in different series
df_maxs = cp.deepcopy(df_mins)
for s in self.series_names:
series = cp.deepcopy(self.series_dict[s])
cols = series.columns.tolist()
cols[-1] = 'y'
series.columns = cols
mins = series.min(axis=0)
maxs = series.max(axis=0)
df_mins = pd.concat((df_mins, mins), axis=1)
df_maxs = pd.concat((df_maxs, maxs), axis=1)
df_mins.columns = self.series_names
df_maxs.columns = self.series_names
return df_mins.transpose(), df_maxs.transpose()
return None
return None
| gpl-3.0 |
MartinDelzant/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
richrr/scripts | python/merging-python-script.py | 1 | 5595 | import sys
import os
import pandas as pd
import multiprocessing as mp
import csv
# this code is written for the merged file with combined pval & fdr. although it could have been written for the file without comb fisher and fdr,
# it is easier to have the output with the comb pval and fdr and use what we need rather than have to search them in the merged file with comb pval and fdr
# or run the next (create network) command to calc the combined pval and fdr.
# cd /nfs3/PHARM/Morgun_Lab/richrr/Cervical_Cancer/analysis/merged/corr/gexpress/stage-ltest_corr/p1
"""
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 1 2" -m 150G -F 100G -r log_merge-py_1 -q biomed -M [email protected] -P 8
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 2 2" -m 150G -F 100G -r log_merge-py_2 -q biomed -M [email protected] -P 8
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 3 2" -m 150G -F 100G -r log_merge-py_3 -q biomed -M [email protected] -P 8
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 4 2" -m 150G -F 100G -r log_merge-py_4 -q biomed -M [email protected] -P 8
"""
infile = sys.argv[1]
analysis = "Analys " + sys.argv[2] + " "
numb_datasets = int(sys.argv[3])
# get the header line form the big file and decide which (analysis) columns to use
header_line = ''
with open(infile, 'r') as f:
header_line = f.readline().strip()
selcted_cols = [i for i, s in enumerate(header_line.split(',')) if analysis in s] #[s for s in header_line.split(',') if analysis in s]
# get the lowest and highest and make range out of it
# this way you get the combinedpval and combined fdr cols
selcted_cols = range(min(selcted_cols), max(selcted_cols)+1)
selcted_cols.insert(0, 0) # explicitly adding the row id cols
print selcted_cols
header_for_print = [header_line.split(',')[i] for i in selcted_cols]
print header_for_print
def process(df):
res = list()
for row in df.itertuples():
#print row
corrs = row[1:numb_datasets+1]
corrs_flag = 0
# write some condition to check for NA
pos = sum(float(num) > 0 for num in corrs)
neg = sum(float(num) < 0 for num in corrs)
#print pos, neg
if len(corrs) == pos and not len(corrs) == neg:
#print "pos"
corrs_flag = 1
if len(corrs) == neg and not len(corrs) == pos:
#print "neg"
corrs_flag = 1
if corrs_flag == 1:
res.append(row)
return res
counter=0
pool = mp.Pool(30) # use 30 processes
funclist = []
# http://gouthamanbalaraman.com/blog/distributed-processing-pandas.html
#for chunck_df in pd.read_csv(infile, chunksize=100, usecols=range(5), index_col=0):
for chunck_df in pd.read_csv(infile, chunksize=100000, usecols=selcted_cols, index_col=0):
counter = counter + 1
print counter
#print chunck_df
# process each data frame
f = pool.apply_async(process,[chunck_df])
funclist.append(f)
#result = list()
OUTfile = infile + analysis.replace(" ", "_") + '-same-dir-corrs.csv'
with open(OUTfile, 'w') as of:
writer = csv.writer(of, delimiter=',', lineterminator='\n')
writer.writerow(header_for_print)
for f in funclist:
csvd = f.get(timeout=10000) # timeout in 10000 seconds
#result.extend(csvd)
writer.writerows(csvd)
#print result
# quick and dirty command to get the first column of the file:
cutcmd = "cut -d, -f 1 " + OUTfile + " > " + OUTfile + "-ids.csv"
os.system(cutcmd)
print "Done"
""" # sequential
corrs_dict = dict() # satisfies corr direction
counter = 0
# with open(in_filename) as in_f, open(out_filename, 'w') as out_f
with open(infile) as f:
for line in f:
counter = counter + 1
line = line.strip()
print line
contents = line.split(",")
corrs = contents[1:numb_datasets+1]
corrs_flag = 0
if counter == 1: # move to next iteration
of.write(line)
continue
# write some condition to check for NA
pos = sum(float(num) > 0 for num in corrs)
neg = sum(float(num) < 0 for num in corrs)
#print pos, neg
if len(corrs) == pos and not len(corrs) == neg:
print "pos"
corrs_flag = 1
if len(corrs) == neg and not len(corrs) == pos:
print "neg"
corrs_flag = 1
if corrs_flag == 1:
corrs_dict[contents[0]] = contents[1:]
'''
if corrs_flag == 0: # no point in analyzing pvals, move to next iteration
continue
pvals = contents[numb_datasets+1:]
print pvals
pvals_flag = 0
# write some condition to check for NA
sig = sum(float(num) < 1 for num in pvals)
#print sig
if len(corrs) == sig:
print "sig"
pvals_flag = 1
if corrs_flag == 1 and pvals_flag == 1:
corrs_dict[contents[0]] = contents[1:]
if counter == 5:
sys.exit(0)
'''
print corrs_dict
""" | gpl-3.0 |
orion-42/numerics-physics-stuff | midpoint_displacement.py | 1 | 2079 | import numpy as np
import matplotlib.pyplot as plt
def make_nth_size(n):
return 2 ** n + 1
def midpoint_displacement(size, init_min_val=0.0, init_max_val=10.0, max_error=1.0):
noise = np.zeros((size, size))
noise[0, 0] = np.random.uniform(init_min_val, init_max_val)
noise[0, -1] = np.random.uniform(init_min_val, init_max_val)
noise[-1, 0] = np.random.uniform(init_min_val, init_max_val)
noise[-1, -1] = np.random.uniform(init_min_val, init_max_val)
rec_midpoint_displacement(noise, max_error)
return noise
def rec_midpoint_displacement(noise, max_error):
if noise.shape == (2, 2):
return
midpoint_index = noise.shape[0] // 2
noise[0, midpoint_index] = (
(noise[0, 0] + noise[0, -1]) / 2.0 +
np.random.uniform(-max_error, max_error)
)
noise[midpoint_index, 0] = (
(noise[0, 0] + noise[-1, 0]) / 2.0 +
np.random.uniform(-max_error, max_error)
)
noise[midpoint_index, -1] = (
(noise[0, -1] + noise[-1, -1]) / 2.0 +
np.random.uniform(-max_error, max_error)
)
noise[-1, midpoint_index] = (
(noise[-1, 0] + noise[-1, -1]) / 2.0 +
np.random.uniform(-max_error, max_error)
)
noise[midpoint_index, midpoint_index] = (
noise[0, midpoint_index] +
noise[midpoint_index, 0] +
noise[midpoint_index, -1] +
noise[-1, midpoint_index]
) / 4.0 + np.random.uniform(-max_error, max_error)
new_max_error = max_error / 2.0
rec_midpoint_displacement(noise[:midpoint_index + 1, :midpoint_index + 1],
new_max_error)
rec_midpoint_displacement(noise[:midpoint_index + 1, midpoint_index:],
new_max_error)
rec_midpoint_displacement(noise[midpoint_index:, :midpoint_index + 1],
new_max_error)
rec_midpoint_displacement(noise[midpoint_index:, midpoint_index:],
new_max_error)
if __name__ == "__main__":
plt.pcolormesh(midpoint_displacement(make_nth_size(6), max_error=2.0))
| mit |
jzt5132/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
markusrenepae/DLTA | Version 1.2/Branches/Mean_comparison/Automation.py | 1 | 2805 | import datetime
import time
import matplotlib.pyplot as plt
import numpy as np
import pyautogui
import Calvar as var
import Methods as met
def move_cursor():
pyautogui.moveTo(var.rand)
def open_IQ():
pyautogui.click(var.chrome)
move_cursor()
time.sleep(1)
def put():
pyautogui.click(var.put)
move_cursor()
def call():
pyautogui.click(var.call)
move_cursor()
def new_position():
pyautogui.click(var.newpos)
move_cursor()
def get_price_data(screen):
price_data = np.array([])
for x in range(var.xstart, var.xend):
for y in range(var.ystart, var.yend):
pixel_color = screen.getpixel((x, y))
if pixel_color == (43, 171, 63):
price_data = np.append(price_data, -y)
return price_data
elif pixel_color == (255, 167, 77):
price_data = np.append(price_data, -y)
break
def plot_state(price_data_x, price_data, fit, std_limit):
plt.plot(price_data_x, price_data, "orange")
plt.scatter(price_data_x[-1], price_data[-1], color="green", s=5)
plt.plot(price_data_x, fit, "red")
plt.plot(price_data_x, fit + std_limit, "black")
plt.plot(price_data_x, fit - std_limit, "black")
plt.show()
def decide_bet(screen):
price_data = get_price_data(screen)
if price_data is not None:
mean1 = np.mean(price_data)
mean2 = np.mean(price_data[int(6 * len(price_data) / 7):len(price_data)])
price_data_x = np.array(range(price_data.size))
fit_func = np.polyfit(price_data_x, price_data, 1)
fit = np.polyval(fit_func, price_data_x)
error = price_data - fit
currentsec = datetime.datetime.now().second
if var.timeupper - 9 <= currentsec < var.timeupper:
factor = var.stdfactor
else:
factor = var.stdfactor * (1 + 0.75 * (11 - currentsec) / 11) # dynamic limit
std_limit = factor * np.std(error)
dot_y = price_data[-1]
last_fit = fit[-1]
if dot_y > last_fit + std_limit and (
mean2 - mean1 > 140 or (price_data[-1] - mean2 > 160 and currentsec > var.timelower + 10)):
put()
return True
elif dot_y < last_fit - std_limit and (
mean1 - mean2 > 170 or (mean2 - price_data[-1] > 160 and currentsec > var.timelower + 10)):
call()
return True
else:
return False
open_IQ()
bet_done = False
while True:
screen = pyautogui.screenshot()
seconds = datetime.datetime.now().second
if var.timelower <= seconds < var.timeupper:
bet_done = decide_bet(screen)
if bet_done:
time.sleep(25)
else:
pass
new_position()
else:
pass
| apache-2.0 |
linebp/pandas | pandas/io/clipboards.py | 14 | 3793 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO, PY2
def read_clipboard(sep='\s+', **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
Parameters
----------
sep : str, default '\s+'.
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = '\t'
if sep is None and kwargs.get('delim_whitespace') is None:
sep = '\s+'
return read_table(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop('encoding', 'utf-8')
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise ValueError('clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
text = buf.getvalue()
if PY2:
text = text.decode('utf-8')
clipboard_set(text)
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| bsd-3-clause |
hantsik/dissertation | struct_func.py | 1 | 25669 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 9 21:30:04 2017
@author: hanna-stinasonts
"""
'''
INTRO
This is just a long list of functions that I have used in the code
The functions can be divided up to
a) Basic - functions that use predefined paramters and built in functions.
These are all based on textbook structral engineering formulas.
b) Advanced - functions that use SOME predefined paramters but calculate
most by using the user-written and then imported Basic functions.
'''
# not really sure if these need to be here but just kindof left it like that
import numpy as np
import copy
np.set_printoptions(precision=1) #set working precision to .1
import matplotlib.pyplot as plt
import pandas
##################################
####---BASIC FUNCTIONS---------###
##################################
def geometry(m, b, h):
"Return the geometrical properties of cross-section, including the area, location of neutral axis, and second moment of area"
#LOCATION OF NEUTRAL AXIS
A = 0.0 #Area
Ay = 0.0 #First moment of area
i = 0
j = 0
while ( i<h ):
while ( j<b ):
if ( (m[i,j]) != 0 ) : #If 0, then considered as empty and not counted
A=A+1
Ay = Ay + (i+ 0.5) #i+0.5 to account for centrid of element (element height 1)
j= j + 1
i=i+1
j=0
ybary= Ay/A #Location of neutral axis
# CALCULATING SECOND MOMENT OF AREA
Iy=0 #Second moment of area
i=0 #Again, i and j are elements to help counting
j=0
while ( i<h ):
while ( j<b ):
if ( (m[i,j]) != 0 ): #As 0 would bean an elements with no area
Iy = Iy + ( i+0.5 - ybary )**2
j= j + 1
i=i+1
j=0
return A, ybary, Iy
# CALCULATE BENDING STRESS IN EACH ELEMENT - major axis
def moment(m, M, b, h, ybar, I):
"Return the moment matrix"
#Make a copy of the element matrix and substitue values with moment stresses
moment1 = copy.deepcopy (m) #deepcopy because normal copy doesn't work proper
i=0
j=0
while ( i<h ):
while ( j<b ):
if ( (moment1[i,j]) != 0) :
moment1 [i,j] = (M*(ybar-i-0.5))/I #Calculates bending stress in each element
j= j + 1
i=i+1
j=0
return moment1
# CALCULATING SHEAR STRESSES IN EACH ELEMENT - major axis
def shear(m, S, b, h, ybary, Iy):
"Return the shear stresses matrix"
shear1 = copy.deepcopy (m)
i=0
j=0
As = 0 # To calculate the area in shear
t = 0 # To calculate the breadth of a slither
Ay=0
'''
The shear stress formula is a bit funny,
first you need to go through each row to know how many elements in slither
are carrying the stress and what is the total area above slither.
Then knowing this, you can go through the row again and applu the same
shear stress value to all the elements, hence the double loop.
'''
while ( i<h ): #Starts to read from top first row down
#The loop where you count elements in and up to end of row
while ( j<b ): # Starts to read elements from left in frist row
if ( (shear1[i,j]) != 0) :
t= t+1
As= As+1
Ay = Ay + 1*(ybary-i-0.5)
j=j+1
j=0
#Loop where you assign values to elements in row
while ( j<b ): # Starts to read elements from left in frist row
if ( (shear1[i,j]) != 0 ) :
shear1[i,j]=(S*(Ay)/(Iy*t)) #NB the ybar-i/2 is a roug simplification of centroid
j=j+1
j=0
i=i+1
t=0
'''
Another slightly tricky bit here.
Because I have taken cuts through the bottom of the element row it means that
it will produce results where the top row as some shear in it with the bottom
row being all elemenst 0. I'm going to fix the top row value to always be zero because
that would be most correct in the theorical terms.
Although practically this doesn't make much difference to the results, but
I would prefer to keep top and bottom results symmetrical.
'''
shear1[0, :] = 0 #Turn all first row values to zero
return shear1
# PRINCIPAL STRESSES
def principal (m, b, h, momentx, momenty, shear1):
"Return the principal stresses matrix"
'''
So here comes two more tricky parts
Tricky nr 1
I want to compare the MAX principal stress which could be both pos or neg
Python always takes the pos value of a square root, therefore i need to try out
total principal stress with both the pos or neg value of squareroot
Hence the z with '+' and w with '-'
Then i go on to compare the two values in abs terms, and assign the bigger
one to element.
Tricky (and questionable) nr 2 - NEEDS TO BE REVISED/CONFIRMED
Initially I had just abs(z) > abs(w), and not really thinking about what will
happen if abs(z) = abs (w).
Then I realised if abs(z) = abs (w) it means that (momentx[i,j] + momenty[i,j])/2
value must be 0, meaning the only force acting would be the shear force.
I have used an arbitraru sign convention currently for the shear force so
not really sure what sign it should be but have left it as pos for now.
'''
principal1 = copy.deepcopy (m)
i=0
j=0
while ( i<h ):
while ( j<b ):
if ( (principal1[i,j]) != 0 ) :
# Tricky nr 1
z = (momentx[i,j] + momenty[i,j])/2 + (( ((momentx[i,j] - momenty[i,j])/2)**2 + (shear1[i,j])**2 )**(0.5))
w = (momentx[i,j] + momenty[i,j])/2 - (( ((momentx[i,j] - momenty[i,j])/2)**2 + (shear1[i,j])**2 )**(0.5))
#Tricky (and questionable) nr 2 - NEEDS TO BE REVISED/CONFIRMED
if (abs(z) >= abs(w)):
principal1 [i,j]=z
else:
principal1 [i,j]=w
j= j + 1
i=i+1
j=0
return principal1
##################################
####---ADVANCED FUNCTIONS------###
##################################
def initial (h,b,Mx,My,S,P,maxstress):
"Return the geometrical properties of cross-section"
'''
This function calculates the round 0 principal stresses, meaning in the
cross-section where no material has yet to be removed, utilising the above
written functions.
'''
from struct_func import geometry, moment, shear, principal
import numpy as np
import copy
np.set_printoptions(precision=1) # Sets the rounding percision to 0.1
#Creating the matrix of a solid hxb element filled for the time with all values 1
m = np.ones((h, b), dtype=np.float ) #np.float isntead of int in order to get decimals
mx= copy.deepcopy (m.transpose()) #same matrix turned around to work with minor axis
#MAJOR AXIS GEOMETRY
Y= (geometry(m,b,h))
A =Y[0]
ybary = Y[1]
Iy = Y[2]
#MINOR AXIS GEOMETRY
Y= (geometry(mx,h,b)) #notice how you have to swap b and h here to work in minor
ybarx = Y[1]
Ix = Y[2]
# CALCULATE BENDING AND SHEAR IN EACH ELEMENT
momentx= (moment(m,Mx,b,h,ybary,Iy))
momenty= (moment (mx,My,h,b,ybarx,Ix)) #takes the turned around matrix to local axis
momenty= copy.deepcopy (momenty.transpose()) # turns it back to aling global axes for summing purposes
shear1 = (shear(m,S, b, h,ybary,Iy))
axial1=P/A
#This is a check to ensure that axial stresses within the stress limits
if (axial1 > maxstress):
print("The maxstress is ", maxstress, " but the axial stress is ", axial1)
print("Section is overstressed, choose different input", "\n")
principal1 = (principal(m, b, h, momentx, momenty, shear1))
return principal1
def iteration (h,b,f,w, Mx,My,My1,S,P,maxstress,step):
"Return the final iterataion of the cross-section"
from struct_func import geometry, moment, shear, principal, initial
# The function produces a set of initial principal stresses results
principal1 = (initial (h,b,Mx,My,S,P,maxstress) )
# Then it checks if the principal stresses are within limits
#print ("Intitial princiapl, ", "\n", principal1)
if ( ((abs(principal1))>(maxstress)).any() ):
print ("The max principal stress is more than allowed")
print("Section is overstressed, choose different input", "\n")
# If there is no error message, it proceeds to the iteration loop
else:
kord=1 #Remembers the number of the iteration
#There may be instances when it's already optimum at first round
#So for these cases
remember= copy.deepcopy (principal1)
absprincipal1= abs(principal1) # You take abs as you have both +&- stresses
'''
Here I have incoprorated a boundary condition for constructability purposes.
No matter how small the stresses ight be, it will always keep a flat top flange.
To do this, I have only allowed the loop the delete elements that are below
the predefine flange thickness of f.
Therefore the iteration loop will stop if it has either reached the
'perfect optimum' section (meaning that material stresses are as close to max
as possible)
OR
If all elements below the flange have already been deleted.
To check the latter I have made a new matric which contains all the elements
stresses below the flange. If all these are 0 the iteration loop will close.
'''
#Matrix of element stresses below the flange
flange1 = np.delete(principal1, np.s_[0:(f)], axis=0)
#flange for
#If there are no out of plane reactions, then place web in the moddle
if (My1==0):
flange = np.delete(flange1, np.s_[ int(b/2 - w/2 - 1): int(b/2 + w/2) ] , axis=1)
#If there are, then place web in the outside rows
else:
flange2 = np.delete(flange1, np.s_[ int(b-w-1):int(b) ] , axis=1)
flange = np.delete(flange2, np.s_[ int(0):int(w) ] , axis=1)
# Matrix now delets also all the web members
#http://stackoverflow.com/questions/16632568/remove-a-specific-column-in-numpy
'''
We want to remove the web lines from the matrix too
nobody will design an uneven width of a beam anyway,
so remove elements from
b/2
'''
'''
The while conditon:
Checks the maximum absolute value of all non-zero elements to be smaller than max stress limit
And if all elements within flange matrix are not zero.
If either case is true the loop will stop.
'''
while (((np.max(absprincipal1[np.nonzero(absprincipal1)]))<maxstress) & np.any(flange) ) :
'''
This loop makes everything that is understressed value to nought
taking into account the step size and number of itearation.
Starting from row f means that no elements within flange will be deleted.
'''
#print (flange)
#print (principal1)
#print ("Max stressxkordxstep ", maxstress*step*kord, "\n")
#print ("Max stress value, ", np.max(absprincipal1[np.nonzero(absprincipal1)]) , "\n")
i=f
j=0
if (My1==0):
while ( i<h ):
#print (i)
while ( j<b ):
if ( absprincipal1[i,j] < maxstress*step*kord ):
#print (absprincipal1[i,j] < maxstress*step*kord)
if ( j < (b/2-w/2) or j > (b/2+w/2-1)):
principal1 [i,j] = 0
j= j + 1
i=i+1
j=0
else:
while ( i<h ):
#print (i)
while ( j<b ):
if ( absprincipal1[i,j] < maxstress*step*kord ):
#print (absprincipal1[i,j] < maxstress*step*kord)
if ( j > (w-1) and j < (b-w)):
principal1 [i,j] = 0
j= j + 1
i=i+1
j=0
#print ("after removing material new matrix is, ", "\n", principal1 )
# Then you remember the new matrix with removed material and work with that instead
m = copy.deepcopy(principal1)
mx= copy.deepcopy (m.transpose())
#MAJOR AXIS GEOMETRY
Y= (geometry(m,b,h))
A =Y[0]
ybary = Y[1]
Iy = Y[2]
#MINOR AXIS GEOMETRY
Y= (geometry(mx,h,b))
ybarx = Y[1]
Ix = Y[2]
# CALCULATE STRESSes IN EACH ELEMENT
momentx= (moment (m,Mx,b,h,ybary,Iy))
momenty= (moment (mx,My,h,b,ybarx,Ix))
momenty= copy.deepcopy (momenty.transpose())
shear1 = (shear(m,S, b, h,ybary,Iy))
# COMBINES TO PRINCIPAL
principal1 = (principal(m, b, h, momentx, momenty, shear1))
#print(principal1)
# REPRODUCES THE NEW FLANGE MATRIC OF ELEMENTS BELOW IT
#The first line only does stuff with top flange
flange1 = np.delete(principal1, np.s_[0:(f)], axis=0)
#If there are no out of plane reactions, then place web in the moddle
if (My1==0):
flange = np.delete(flange1, np.s_[ int(b/2 - w/2 - 1): int(b/2 + w/2) ] , axis=1)
#If there are, then place web in the outside rows
else:
flange2 = np.delete(flange1, np.s_[ int(b-w-1):int(b) ] , axis=1)
flange = np.delete(flange2, np.s_[ int(0):int(w) ] , axis=1)
axial1 = round(P/A,1)
kord = kord + 1
#print (kord)
absprincipal1= abs(principal1)
'''
The iteration loop will stop if any stress value has gone above limit.
Therefore we need to retrieve the results from before this happened, which is what
the 'remember' bit below does.
'''
#REMEMBERING THE CORRECT VALUES
# No values in principal matrix are greater than maxstress
# AND axial stress is smaller than maxstress
#print (np.max(absprincipal1), "viimane if tingimus" )
if ((np.max(absprincipal1[np.nonzero(absprincipal1)])) < maxstress) and (axial1 < maxstress):
remember= copy.deepcopy (principal1)
#rememberflange = copy.deepcopy(flange)
remembera= A
#print (flange)
#print (remember)
#index = ['Row'+str(i) for i in range(1, len(rememberflange)+1)]
#df = pandas.DataFrame(rememberflange, index=index)
#df.to_csv('flange.csv', split=', ')
#print ("Principal, ", "\n", remember)
print ("The area in mm2 is ", remembera)
return remember
def flangemove (flangeremember,b, Mx, My,S,P,maxstress,step):
"This gunction uses a defined web and flance thickness at which it moves the bottom around, not changing any cross-section"
from struct_func import geometry, moment, shear, principal
'''
You take the optimised section also ti height
and try to delete more members of the bottom flange
'''
#Measuring flange-actual thickness
# so you take the moved base result and start deleting elements
# from both ends of the bottom flange
#ticky because the first will be zero anyway
absnewsection1= abs(flangeremember)
#print ("Max in imported stress ", ((np.max(absnewsection1[np.nonzero(absnewsection1)]))) )
flangeremember.shape
h = ( flangeremember.shape[0] ) #This return the nr of rows
print ("Measure the height ",h, "\n")
#Now you start removing the row just below
#While max stresses are within allowable limits
remember = copy.deepcopy (flangeremember)
Y= (geometry(remember,b,h))
remembera =Y[0]
newflange= copy.deepcopy(flangeremember)
absnewsection = abs(newflange)
#print ("Max current stress ", ((np.max(absnewsection[np.nonzero(absnewsection)]))) )
# you start from the two outermost rows of the flange
j1 = 0
j2 = b-1
midpoint=b/2
#print ("The maximums stress is ", (np.max(absnewsection[np.nonzero(absnewsection)])) )
while ( ((np.max(absnewsection[np.nonzero(absnewsection)])) < maxstress) and (j1<midpoint) ):
#print ("Calculating at flange at row ", j1, "\n")
#h is the number of rowns, therefore last one is h-1
i = h-1 #start at the most bottom row
newflange[i,j1] = 0
newflange[i,j2] =0
i=i-1 #move up by one row, because the bottomw row will be zero at ends
#delete all elelemnts in that column of the flange
while ( ( newflange[i,j1] != 0 ) and ( newflange[i,j2] !=0 ) ):
newflange[i,j1] = 0
newflange[i,j2] = 0
i=i-1
#Prep for next round
j1=j1+1
j2=j2-1
#print (newsection,"\n")
'''
then you should recalculate and check the stress levels
and if they are fine you should then go on and delete the next two columsn
'''
m = copy.deepcopy(newflange)
mx= copy.deepcopy (m.transpose())
#MAJOR AXIS GEOMETRY
Y= (geometry(m,b,h))
A =Y[0]
ybary = Y[1]
Iy = Y[2]
#MINOR AXIS GEOMETRY
Y= (geometry(mx,h,b))
ybarx = Y[1]
Ix = Y[2]
# CALCULATE STRESSes IN EACH ELEMENT
momentx= (moment (m,Mx,b,h,ybary,Iy))
momenty= (moment (mx,My,h,b,ybarx,Ix))
momenty= copy.deepcopy (momenty.transpose())
shear1 = (shear(m,S, b, h,ybary,Iy))
axial1=P/A
# COMBINES TO PRINCIPAL
newsection2 = (principal(m, b, h, momentx, momenty, shear1))
#print ("recalculating the principal stresses for new geomter, ", "\n")
#print(newsection,"\n")
absnewsection= abs(newsection2)
'''
The iteration loop will stop if any stress value has gone above limit.
Therefore we need to retrieve the results from before this happened, which is what
the 'remember' bit below does.
if j1==100:
remembercopy2= np.flipud(newsection2)
remembercopy2[remembercopy2 == 0.0] = np.nan
np.flipud(remembercopy2)
# http://stackoverflow.com/questions/10114576/setting-points-with-no-data-to-white-with-matplotlib-imshow
img = plt.imshow(remembercopy2, interpolation='nearest')
img.set_cmap('plasma') #Plasma is pretty http://matplotlib.org/examples/color/colormaps_reference.html
plt.clim(-275,275)
plt.axis('off')
plt.ylim([0,500])
plt.show()
plt.clf()
if j1==50:
remembercopy2= np.flipud(newsection2)
remembercopy2[remembercopy2 == 0.0] = np.nan
np.flipud(remembercopy2)
# http://stackoverflow.com/questions/10114576/setting-points-with-no-data-to-white-with-matplotlib-imshow
img = plt.imshow(remembercopy2, interpolation='nearest')
img.set_cmap('plasma') #Plasma is pretty http://matplotlib.org/examples/color/colormaps_reference.html
plt.clim(-275,275)
plt.axis('off')
plt.ylim([0,500])
plt.show()
plt.clf()
'''
#REMEMBERING THE CORRECT VALUES
# No values in principal matrix are greater than maxstress
# AND axial stress is smaller than maxstress
if ((np.max(absnewsection[np.nonzero(absnewsection)])) < maxstress) and (axial1 < maxstress):
remember= copy.deepcopy (newsection2)
remembera= A
#print ("The area in mm2 is ", remembera)
#print ("The final I value n major axis is, ", Iy)
#print ("And the rotation is ", (Iy*190000/My) )
#print ("Which results in a deflection of (in mm), ", (Iy*190000*700/My) )
#where 700 only applied for length 7000 and 10 cuts
return remember
def basemove (midsection, h,b,f,w, Mx,My1, My,S,P,maxstress,step):
"This gunction uses a defined web and flance thickness at which it moves the bottom around, not changing any cross-section"
from struct_func import geometry, moment, shear, principal
'''
You'll take the optimum mid-section, and start by deleting the row just
below the top flange.
The top flange is probably thicker now than the initial perscribed
therefore the thickness needs to be measures
'''
#Measuring flange-actual thickness
fa=0
i=0
#just to make sure there is not web at that place
if (My1==0):
j=0
else:
j= b/2-w/2-5
#print ("j value i", j)
while ( midsection[i,j] != 0 ):
fa=fa+1
i=i+1
#print("Flange thickness in this case is ,", fa, )
#Now you start removing the row just below
#While max stresses are within allowable limits
remember = copy.deepcopy(midsection)
newsection= copy.deepcopy(midsection)
absnewsection = abs(newsection)
while ( (np.max(absnewsection[np.nonzero(absnewsection)])) < maxstress ):
newsection1 = np.delete(newsection, [fa+1], axis=0)
h = h-1 #sest iga kord läheb väiksemaks
m = copy.deepcopy(newsection1)
mx= copy.deepcopy (m.transpose())
#MAJOR AXIS GEOMETRY
Y= (geometry(m,b,h))
A =Y[0]
ybary = Y[1]
Iy = Y[2]
#MINOR AXIS GEOMETRY
Y= (geometry(mx,h,b))
ybarx = Y[1]
Ix = Y[2]
# CALCULATE STRESSes IN EACH ELEMENT
momentx= (moment (m,Mx,b,h,ybary,Iy))
momenty= (moment (mx,My,h,b,ybarx,Ix))
momenty= copy.deepcopy (momenty.transpose())
shear1 = (shear(m,S, b, h,ybary,Iy))
axial1=P/A
# COMBINES TO PRINCIPAL
newsection2 = (principal(m, b, h, momentx, momenty, shear1))
newsection=copy.deepcopy(newsection2)
absnewsection= abs(newsection2)
'''
The iteration loop will stop if any stress value has gone above limit.
Therefore we need to retrieve the results from before this happened, which is what
the 'remember' bit below does.
'''
#REMEMBERING THE CORRECT VALUES
# No values in principal matrix are greater than maxstress
# AND axial stress is smaller than maxstress
'''
remembercopy= copy.deepcopy(newsection)
remembercopy[remembercopy == 0.0] = np.nan
# http://stackoverflow.com/questions/10114576/setting-points-with-no-data-to-white-with-matplotlib-imshow
img = plt.imshow(remembercopy, interpolation='nearest')
img.set_cmap('plasma') #Plasma is pretty http://matplotlib.org/examples/color/colormaps_reference.html
plt.clim(-275,275)
plt.axis('off')
plt.ylim([0,500])
plt.show()
plt.clf()
print (h)
'''
if ((np.max(absnewsection[np.nonzero(absnewsection)])) < maxstress) and (axial1 < maxstress):
remember= copy.deepcopy (newsection2)
#remembera= A
#print ("The area in mm2 is ", remembera)
#print ("The actual hight now is, ", h)
#print ("The final I value n major axis is, ", Iy)
#print ("And the rotation is ", (Iy*180000/My) )
#print ("Which results in a deflection of (in mm), ", (Iy*180000*700/My) )
return remember
| mit |
abimannans/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
nvoron23/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/tests/indexing/multiindex/test_sorted.py | 4 | 4461 | import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestMultiIndexSorted:
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame(
[[0, 1, 0, "x"], [0, 0, 1, "y"]], columns=index_columns + ["data"]
)
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.loc[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=["a", "b", "c"])
xp = Series(["x"], index=xp_idx, name="data")
tm.assert_series_equal(rs, xp)
def test_getitem_slice_not_sorted(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.sort_index(level=1).T
# buglet with int typechecking
result = df.iloc[:, : np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("key", [None, lambda x: x])
def test_frame_getitem_not_sorted2(self, key):
# 13431
df = DataFrame(
{
"col1": ["b", "d", "b", "a"],
"col2": [3, 1, 1, 2],
"data": ["one", "two", "three", "four"],
}
)
df2 = df.set_index(["col1", "col2"])
df2_original = df2.copy()
with tm.assert_produces_warning(FutureWarning):
return_value = df2.index.set_levels(
["b", "d", "a"], level="col1", inplace=True
)
assert return_value is None
with tm.assert_produces_warning(FutureWarning):
return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True)
assert return_value is None
assert not df2.index.is_monotonic
assert df2_original.index.equals(df2.index)
expected = df2.sort_index(key=key)
assert expected.index.is_monotonic
result = df2.sort_index(level=0, key=key)
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_values_key(self, multiindex_dataframe_random_data):
arrays = [
["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
index = index.sort_values( # sort by third letter
key=lambda x: x.map(lambda entry: entry[2])
)
result = DataFrame(range(8), index=index)
arrays = [
["foo", "foo", "bar", "bar", "qux", "qux", "baz", "baz"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
expected = DataFrame(range(8), index=index)
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
arrays = [np.array(x) for x in zip(*df.columns.values)]
result = df["foo"]
result2 = df.loc[:, "foo"]
expected = df.reindex(columns=df.columns[arrays[0] == "foo"])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
df = df.T
result = df.xs("foo")
result2 = df.loc["foo"]
expected = df.reindex(df.index[arrays[0] == "foo"])
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [
["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(np.random.randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
result = s["qux"]
result2 = s.loc["qux"]
expected = s[arrays[0] == "qux"]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
| bsd-3-clause |
FayolChang/vnpy | vn.datayes/api.py | 19 | 45371 | #encoding: UTF-8
import os
import json
import time
import requests
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread, Timer
from pymongo import MongoClient
from requests.exceptions import ConnectionError
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config.
- domain: string, api domain.
- ssl: boolean, specifes http or https usage.
- version: string, version of the api. Currently 'v1'.
- header: dictionary; the request header which contains
authorization infomation.
"""
head = 'my config'
toke_ = '44ebc0f058981f85382595f9f15f967' + \
'0c7eaf2695de30dd752e8f33e9022baa0'
token = '7c2e59e212dbff90ffd6b382c7afb57' + \
'bc987a99307d382b058af6748f591d723'
body = {
'ssl': False,
'domain': 'api.wmcloud.com/data',
'version': 'v1',
'header': {
'Connection' : 'keep-alive',
'Authorization': 'Bearer ' + token
}
}
def __init__(self, head=None, token=None, body=None):
"""
Reloaded constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container. The fundamental of all other data
container objects defined within this module.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- History:
- Bar
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class History(BaseDataContainer):
"""
Historical data container. The foundation of all other pandas
DataFrame-like two dimensional data containers for this module.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ...],
'retCode': 1,
'retMsg': 'Success'}.
So the body of data is actually in data['data'], which is
our target when constructing the container.
"""
try:
assert 'data' in data
self.body = pd.DataFrame(data['data'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
class Bar(History):
"""
Historical Bar data container. Inherits from History()
DataFrame-like two dimensional data containers for Bar data.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY_BAR'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [{
'exchangeCD': 'XSHG',
'utcOffset': '+08:00',
'unit': 1,
'currencyCD': 'CNY',
'barBodys': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ... ],
'ticker': '000001',
'shortNM': u'\u4e0a\u8bc1\u6307\u6570'
}, ...(other tickers) ],
'retCode': 1,
'retMsg': 'Success'}.
When requesting 1 ticker, json['data'] layer has only one element;
we expect that this is for data collectioning for multiple tickers,
which is currently impossible nevertheless.
So we want resp.json()['data'][0]['barBodys'] for Bar data contents,
and that is what we go into when constructing Bar.
"""
try:
assert 'data' in data
assert 'barBodys' in data['data'][0]
self.body = pd.DataFrame(data['data'][0]['barBodys'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
#----------------------------------------------------------------------
# Datayes Api class
class PyApi(object):
"""
Python based Datayes Api object.
PyApi should be initialized with a Config json. The config must be complete,
in that once constructed, the private variables like request headers,
tokens, etc. become constant values (inherited from config), and will be
consistantly referred to whenever make requests.
privates
--------
* _config: Config object; a container of all useful settings when making
requests.
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session object.
examples
--------
"""
_config = Config()
# request stuffs
_ssl = False
_domain = ''
_version = 'v1'
_header = dict()
_token = None
_session = requests.session()
def __init__(self, config):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
"""
if config.body:
try:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._version = config.body['version']
self._header = config.body['header']
except KeyError:
msg = '[API]: Unable to configure api; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[API]: Unable to configure api; ' + str(e)
raise VNPAST_ConfigError(msg)
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
else:
self._domain = 'http://' + self._domain
def __access(self, url, params, method='GET'):
"""
request specific data from given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
assert resp.status_code == 201
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise VNPAST_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise VNPAST_RequestError(msg)
#----------------------------------------------------------------------
# directly get methods - Market data
def get_equity_M1_one(self,
start='', end='', secID='000001.XSHG'):
"""
Get 1-minute intraday bar data of one security.
parameters
----------
* start, end: string; Time mark formatted in 'HH:MM'. Specifies the
start/end point of bar. Note that the requested date is the
latest trading day (only one day), and the default start/end time is
'09:30' and min(now, '15:00'). Effective minute bars range from
09:30 - 11:30 in the morning and 13:01 - 15:00 in the afternoon.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
"""
url = '{}/{}/api/market/getBarRTIntraDay.json'.format(
self._domain, self._version)
params = {
'startTime': start,
'endTime': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
print resp.json()
data = Bar(resp.json())
return data
except AssertionError: return 0
def get_equity_M1(self, field='', start='20130701', end='20130730',
secID='000001.XSHG', output='df'):
"""
1-minute bar in a month, currently unavailable.
parameters
----------
* field: string; variables that are to be requested.
* start, end: string; Time mark formatted in 'YYYYMMDD'.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
url = '{}/{}/api/market/getBarHistDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'startDate': start,
'endDate': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = Bar(resp.json())
elif output == 'list':
data = resp.json()['data'][0]['barBodys']
return data
except AssertionError: return 0
def get_equity_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one security.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for securities)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- actPreClosePrice* double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- dealAmount* integer.
- turnoverRate double.
- accumAdjFactor* double.
- negMarketValue* double.
- marketValue* double.
- PE* double.
- PE1* double.
- PB* double.
Field is an optional parameter, default setting returns all fields.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of bar. Start and end are optional parameters. If
start, end and ticker are all specified, default 'one' value will be
abandoned.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange.
* ticker: string; the trading code in the form of '000001'.
* one: string; Date mark formatted in 'YYYYMMDD'.
Specifies one date on which data of all tickers are to be requested.
Note that to get effective json data response, at least one parameter
in {secID, ticker, tradeDate} should be entered.
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktEqud.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
#return resp
except AssertionError: return 0
def get_block_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_repo_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_bond_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one bond instrument.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for bonds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- turnoverRate double.
- dealAmount* integer.
- accrInterest* double.
- YTM(yieldToMaturity)* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktBondd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one future contract.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for future contracts)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- contractObject* string.
- contractMark* string.
- preSettlePrice* double.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol integer.
- turnoverValue integer.
- openInt* integer.
- CHG* double.
- CHG1* double.
- CHGPct* double.
- mainCon* integer (0/1 flag).
- smainCon* integer (0/1 flag).
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFutd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_main_D1(self, field='', start='', end='', mark='',
obj='', main=1, one=20150513):
"""
"""
pass
def get_fund_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one mutual fund.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for funds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
- discount* double.
- discountRatio* double.
- circulationShares* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFundd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_index_D1(self, field='', start='', end='', indexID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one stock index.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for indices)
- indexID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- porgFullName* string.
- exchangeCD string.
- preCloseIndex double.
- openIndex double.
- highestIndex double.
- lowestIndex double.
- closeIndex double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktIdxd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'indexID': indexID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_option_D1(self, field='', start='', end='', secID='',
optID='' ,ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one option contact.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for options)
- secID string.
- optID* string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol double.
- turnoverValue double.
- openInt* integer.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktOptd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'optID': optID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_stockFactor_D1(self, field='', secID='',
ticker='000001', start=20130701, end=20130801):
"""
Get 1-day interday factor data for stocks.
parameters
----------
* field: string; variables that are to be requested.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
url = '{}/{}/api/market/getStockFactorsDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
#----------------------------------------------------------------------
# directly get methods - Fundamental Data
def get_balanceSheet(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtBS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_balanceSheet_bnk(self):
"""
"""
pass
def get_balanceSheet_sec(self):
"""
"""
pass
def get_balanceSheet_ins(self):
"""
"""
pass
def get_balanceSheet_ind(self):
"""
"""
pass
def get_cashFlow(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtCF.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_cashFlow_bnk(self):
"""
"""
pass
def get_cashFlow_sec(self):
"""
"""
pass
def get_cashFlow_ins(self):
"""
"""
pass
def get_cashFlow_ind(self):
"""
"""
pass
def get_incomeStatement(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtIS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_incomeStatement_bnk(self):
"""
"""
pass
def get_incomeStatement_sec(self):
"""
"""
pass
def get_incomeStatement_ins(self):
"""
"""
pass
def get_incomeStatement_ind(self):
"""
"""
pass
#----------------------------------------------------------------------
# multi-threading download for database storage.
def __drudgery(self, id, db, indexType,
start, end, tasks, target):
"""
basic drudgery function.
This method loops over a list of tasks(tickers) and get data using
target api.get_# method for all those tickers.
A new feature 'date' or 'dateTime'(for intraday) will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date(time) mark. With the setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* indexType: string(enum): 'date' or 'datetime', specifies what
is the collection index formatted.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
* target: method; the api.get_# method that is to be called by
drudgery function.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
if indexType == 'date':
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
elif indexType == 'datetime':
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
else:
raise ValueError
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = target(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_equity_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_equity_D1)
def get_future_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_future_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_future_D1)
def get_index_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_index_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_index_D1)
def get_bond_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_bond_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_bond_D1)
def get_fund_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_fund_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_fund_D1)
def get_option_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_option_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_option_D1)
#----------------------------------------------------------------------
def __overlord(self, db, start, end, dName,
target1, target2, sessionNum):
"""
Basic controller of multithreading request.
Generates a list of all tickers, creates threads and distribute
tasks to individual #_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* dName: string; the path of file where all tickers' infomation
are stored in.
* target1: method; targetting api method that overlord calls
to get tasks list.
* target2: method; the corresponding drudgery function.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = target1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = target2,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
def get_equity_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get equity D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/equTicker.json',
target1 = self.get_equity_D1,
target2 = self.get_equity_D1_drudgery,
sessionNum = sessionNum)
def get_future_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get future D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/futTicker.json',
target1 = self.get_future_D1,
target2 = self.get_future_D1_drudgery,
sessionNum = sessionNum)
def get_index_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get index D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/idxTicker.json',
target1 = self.get_index_D1,
target2 = self.get_index_D1_drudgery,
sessionNum = sessionNum)
def get_bond_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get bond D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/bndTicker.json',
target1 = self.get_bond_D1,
target2 = self.get_bond_D1_drudgery,
sessionNum = sessionNum)
def get_fund_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get fund D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/fudTicker.json',
target1 = self.get_fund_D1,
target2 = self.get_fund_D1_drudgery,
sessionNum = sessionNum)
def get_option_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get option D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/optTicker.json',
target1 = self.get_option_D1,
target2 = self.get_option_D1_drudgery,
sessionNum = sessionNum)
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
# to be deprecated
def get_equity_D1_drudgery_(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'date' will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date mark. With the default setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = self.get_equity_D1(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
def get_equity_M1_drudgery(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'dateTime', combined by Y-m-d
formatted date part and H:M time part, will be automatically added into
every json-like documents. It would be a datetime.datetime() timestamp
object. In this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars. Note that to ensure the
success of every requests, the range amid start and end had better be
no more than one month.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
k, n = 1, len(tasks)
for secID in tasks:
try:
data = self.get_equity_M1(start = start,
end = end,
secID = secID,
output = 'list')
map(update_dt, data) # add datetime feature to docs.
coll = db[secID]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_M1_interMonth(self, db, id,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
tasks=[]):
"""
Mid-level wrapper of get equity M1 method.
Get 1-minute bar between specified start year and ending year for
more than one tickers in tasks list.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* id: integer; the ID of wrapper session.
* startYr, endYr: integer; the start and ending year amid which the
1-minute bar data is gotten one month by another employing
get_equity_M1_drudgery() function.
Default values are this year and two years before now.
the complete time range will be sub-divided into months. And threads
are deployed for each of these months.
- example
-------
Suppose .now() is Auguest 15th 2015. (20150815)
startYr, endYr = 2014, 2015.
then two list of strings will be generated:
ymdStringStart = ['20140102','20140202', ... '20150802']
ymdStringEnd = ['20140101','20140201', ... '20150801']
the sub-timeRanges passed to drudgeries will be:
(start, end): (20140102, 20140201), (20140202, 20140301),
..., (20150702, 20150801).
So the actual time range is 20140102 - 20150801.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'02' for k in range(1,13)]
ymdStringStart = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStringEnd = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
k = 0
for t in range(len(ymdStringEnd)-1):
start = ymdStringStart[t]
end = ymdStringEnd[t+1]
subID = str(id) + '_' + str(k)
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (subID, db, start, end, tasks))
thrd.start()
k += 1
def get_equity_M1_all(self, db,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
splitNum=10):
"""
"""
"""
# initialize task list.
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
chunkSize = len(allSecIds)/splitNum
taskLists = [allSecIds[k:k+chunkSize] for k in range(
0, len(allSecIds), chunkSize)]
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStrings = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
print taskLists[0]
print ymdStrings
k = 0
for t in range(len(ymdStrings)-1):
start = ymdStrings[t]
end = ymdStrings[t+1]
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (k, db, start, end, taskLists[0]))
thrd.start()
k += 1
return 1
"""
pass
| mit |
lail3344/sms-tools | lectures/08-Sound-transformations/plots-code/FFT-filtering.py | 21 | 1723 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
N = 2048
start = 1.0*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N)/float(fs), x1*np.hamming(N), 'b', lw=1.5)
plt.axis([0, N/float(fs), min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x (orchestra.wav)')
mX, pX = DFT.dftAnal(x1, np.hamming(N), N)
startBin = int(N*500.0/fs)
nBins = int(N*4000.0/fs)
bandpass = (np.hanning(nBins) * 60.0) - 60
filt = np.zeros(mX.size)-60
filt[startBin:startBin+nBins] = bandpass
mY = mX + filt
plt.subplot(323)
plt.plot(fs*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5, label = 'mX')
plt.plot(fs*np.arange(mX.size)/float(mX.size), filt+max(mX), 'k', lw=1.5, label='filter')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-90,max(mX)+2])
plt.title('mX + filter')
plt.subplot(325)
plt.plot(fs*np.arange(pX.size)/float(pX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX),8])
plt.title('pX')
y = DFT.dftSynth(mY, pX, N)*sum(np.hamming(N))
mY1, pY = DFT.dftAnal(y, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(N)/float(fs), y, 'b')
plt.axis([0, float(N)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1, 'r', lw=1.5)
plt.axis([0,fs/4.0,-90,max(mY1)+2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY.size)/float(pY.size), pY, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY),8])
plt.title('pY')
plt.tight_layout()
plt.savefig('FFT-filtering.png')
plt.show()
| agpl-3.0 |
larissapassos/TAIA-finalproject | Processor.py | 2 | 5408 | import copy
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.preprocessing import scale
import numpy as np
import re
import cPickle as pickle
from nltk.corpus import stopwords
import twokenize # tweet tokenizer
import time
from scipy.sparse import csr_matrix as toSparse
from scipy.sparse import hstack
from warnings import warn
from sys import stdout
tweet_tokenizer = twokenize.tokenize
# Taking important words off of the stopwords dictionary
adverbs = set(['muito', 'extremamente', 'bastante'])
emojiList = set([':-)', '(-:', '=)', '(=', '(:', ':)', ':-(', ')-:', '=(', ')=', ':(', '):', ':D', '^_^', '^__^', '^___^', ':d', 'd:', \
': )', '( :', ': (', ') :', '8)', '(8', '8(', ')8', '8 )', ') 8', '8 (', ';)', '(;', '; )', '( ;', ';-)', '(-;'])
posEmoji = set([':-)', '(-:', '=)', '(=', '(:', ':)', ':-(', ':D', '^_^', '^__^', '^___^', ':d', 'd:', ': )', '( :', '8)', \
'(8', '8 )', ';)', '; )', '; )', '( ;', ';-)', '(-;', '(;'])
negEmoji = emojiList.difference(posEmoji)
punctuation = set([',', ';', '.', ':', '.', '!', '?', '\"', '*', '\'', '(', ')', '-'])
pattern = re.compile(r'(.)\1{2,}', re.DOTALL) # for elongated words truncation
class Processor:
def __init__(self, stopwords, tokenizer=tweet_tokenizer, ngrams=2):
self.tokenizer = tokenizer
self.stopwords = stopwords
self.__target_not = u'n\xe3o'
for adv in adverbs:
if adv in self.stopwords: self.stopwords.remove(adv)
#TODO: use the adverbs below to generate lexicons in target language
self.__adverbs = adverbs
self.lang = 'pt'
self.__fitted = False
# WARNING: do NOT change the parameters of the vectorization. It is already
# set to the optimal configuration.
self.__vectorizer = Tfidf(ngram_range=(1,ngrams), binary=True,
tokenizer=self.tokenizer)
def __preprocess(self, tweetList, verbose=False):
rep_count = []
hst_count = []
hst_last = []
exc_count = []
exc_last = []
qst_count = []
qst_last = []
neg_count = []
tw_length = []
labels = []
ll = len(tweetList)
dot = ll / 50
for x in xrange(ll):
if verbose and dot > 0 and x % dot == 0:
stdout.write("."); stdout.flush()
tweet = tweetList[x].lower().encode('utf-8').decode('utf-8')
# Count reps
reps = pattern.findall(tweet)
if reps != []: tweet = pattern.sub(r'\1\1', tweet)
rep_count.append(len(reps))
# Tokenizing
tweet = self.tokenizer(tweet) # ok to use independent of language
# Removing stopwords and retweet noise
tweet = [word for word in tweet if word not in self.stopwords and not word.startswith('RT')]
# Normalizing mentions, hyperlinks
reps = 0. # float is intended type
hsts = 0. # necessary for scaling
excs = 0.
qsts = 0.
negs = 0.
last = -1.
label = np.inf
for i, word in enumerate(tweet):
if word.startswith(('.@', '@')): #mention
tweet[i] = '___mention___'
if word.startswith(('www','http')):
tweet[i] = '___url___'
if word.startswith('!'):
excs += 1
last = 0
if word.startswith('?'): #TODO: problem with ?!, !?, account for this
qsts += 1
last = 1
if word.startswith('#'):
hsts += 1
last = 2
if word == self.__target_not:
negs += 1
tweet[i] = ''
if (i+1)<len(tweet):
tweet[i+1] = self.__target_not+'___'+tweet[i+1]
else:
tweet[i] = self.__target_not
if label == np.inf and word in posEmoji:
label = +1
elif label == np.inf and word in negEmoji:
label = -1
hst_count.append(hsts)
qst_count.append(qsts)
exc_count.append(excs)
neg_count.append(negs)
tw_length.append(len(tweet))
labels.append(label)
# Removing punctuation
tweet = [''.join([w for w in word if w not in punctuation]) for word in tweet if len(word)>2]
tweet = ' '.join(tweet)
tweetList[x] = tweet
return (tweetList, rep_count, hst_count, exc_count, qst_count, neg_count, tw_length, labels)
def process(self, tweetList, verbose=False):
ll = copy.deepcopy(tweetList)
t0 = time.time()
if verbose:
print 'Normalizing and extracting features'
ret = self.__preprocess(ll, verbose)
corpus = ret[0]
rep_count, hst_count, exc_count, qst_count, neg_count, \
tw_length, labels = map(lambda x: np.array(x), list(ret[1:]))
feats = np.vstack((rep_count, hst_count, exc_count, qst_count, \
neg_count, tw_length, labels)).transpose()
if verbose:
print '\nTime elapsed on processing and feature extraction: %.0fs' % ((time.time()-t0))
return (corpus, feats) | apache-2.0 |
pypot/scikit-learn | sklearn/svm/tests/test_svm.py | 116 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
kagayakidan/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
mlyundin/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
abimannans/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
robogen/CMS-Mining | temp_test.py | 1 | 3491 | from elasticsearch import Elasticsearch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import numpy as np
import datetime as dt
import math
import json
import pprint
pretty = pprint.PrettyPrinter(indent=4)
with open('sites.json', 'r+') as txt:
sitesArray = json.load(txt)
with open('cms.json', 'r+') as txt:
cmsLocate = json.load(txt)
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
def conAtlasTime(time):
if type(time) is str:
return (dt.datetime.strptime(time, '%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp()
elif type(time) is int:
return time
def utcDate(time):
return dt.datetime.fromtimestamp(time, dt.timezone.utc)
esAtlas = Elasticsearch([{
'host': contents[4], 'port': contents[3]
}], timeout=50)
esHCC = Elasticsearch([{
'host': contents[0], 'port': contents[1]
}], timeout=50)
esCon = Elasticsearch([{
'host': contents[4], 'port': contents[5]
}], timeout=50)
scrollPreserve="3m"
startDate = "2016-07-17T00:00:00"
endDate = "2016-07-25T00:00:00"
tenMin = np.multiply(10,60)
stampStart = conAtlasTime(startDate) - tenMin
stampEnd = conAtlasTime(endDate)
tasktype = ["DIGIRECO", "RECO", "DIGI", "DataProcessing"]
#tasktype = ["gensim"]
loc = {}
loc["location"] = np.array([])
def esConAgg(field, task):
queryBody={"query" :
{"bool": {
"must": [
{"match" :
{"CMS_JobType" : "Processing"}
},
#{"range" :
# {"EventRate" : {"gte" : "0"}}
#},
{"exists" :
{ "field" : "ChirpCMSSWEventRate" }
},
{"exists" :
{ "field" : "InputGB" }
},
{"match" :
{"TaskType" : task}
},
{"range" : {
"JobFinishedHookDone" : {
"gt" : int(conAtlasTime(startDate)),
"lt" : int(conAtlasTime(endDate))
}
}},
{"match" :
{"DataLocationsCount" : 1}
},
{"match" :
{"InputData" : "Offsite"}
}
]#,
#"filter" : {
# "term" : { "TaskType" : task }
#}
}
},
"aggs": {
"dev": {
"terms": {"field":field}
}
}
}
scannerCon = esCon.search(index="cms-*",
body=queryBody,
search_type="query_then_fetch",
scroll=scrollPreserve)
scrollIdCon = scannerCon['aggregations']['dev']
conTotalRec = scrollIdCon['buckets']
arrRet = np.array([])
pretty.pprint(conTotalRec)
pretty.pprint(scrollIdCon)
if conTotalRec == 0:
return None
else:
for hit in conTotalRec:
arrRet = np.append(arrRet, hit['key'])
return arrRet
returnedVal = esConAgg("Workflow", "DataProcessing")
pretty.pprint(returnedVal)
| mit |
tseaver/google-cloud-python | automl/google/cloud/automl_v1beta1/tables/tables_client.py | 1 | 123114 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tables helper for the google.cloud.automl_v1beta1 AutoML API"""
import pkg_resources
import logging
from google.api_core.gapic_v1 import client_info
from google.api_core import exceptions
from google.cloud.automl_v1beta1 import gapic
from google.cloud.automl_v1beta1.proto import data_types_pb2
from google.cloud.automl_v1beta1.tables import gcs_client
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version
_LOGGER = logging.getLogger(__name__)
class TablesClient(object):
"""
AutoML Tables API helper.
This is intended to simplify usage of the auto-generated python client,
in particular for the `AutoML Tables product
<https://cloud.google.com/automl-tables/>`_.
"""
def __init__(
self,
project=None,
region="us-central1",
credentials=None,
client=None,
prediction_client=None,
gcs_client=None,
**kwargs
):
"""Constructor.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
Args:
project (Optional[str]): The project ID of the GCP project all
future calls will default to. Most methods take `project` as an
optional parameter, and can override your choice of `project`
supplied here.
region (Optional[str]): The region all future calls will
default to. Most methods take `region` as an optional
parameter, and can override your choice of `region` supplied
here. Note, only `us-central1` is supported to-date.
transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]):
A transport instance, responsible for actually making the API
calls. The default transport uses the gRPC protocol. This
argument may also be a callable which returns a transport
instance. Callables will be sent the credentials as the first
argument and the default transport class as the second
argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
version = _GAPIC_LIBRARY_VERSION
user_agent = "automl-tables-wrapper/{}".format(version)
client_info_ = kwargs.get("client_info")
if client_info_ is None:
client_info_ = client_info.ClientInfo(
user_agent=user_agent, gapic_version=version
)
else:
client_info_.user_agent = user_agent
client_info_.gapic_version = version
if client is None:
self.auto_ml_client = gapic.auto_ml_client.AutoMlClient(
credentials=credentials, client_info=client_info_, **kwargs
)
else:
self.auto_ml_client = client
if prediction_client is None:
self.prediction_client = gapic.prediction_service_client.PredictionServiceClient(
credentials=credentials, client_info=client_info_, **kwargs
)
else:
self.prediction_client = prediction_client
self.project = project
self.region = region
self.credentials = credentials
self.gcs_client = gcs_client
def __lookup_by_display_name(self, object_type, items, display_name):
relevant_items = [i for i in items if i.display_name == display_name]
if len(relevant_items) == 0:
raise exceptions.NotFound(
"The {} with display_name='{}' was not found.".format(
object_type, display_name
)
)
elif len(relevant_items) == 1:
return relevant_items[0]
else:
raise ValueError(
(
"Multiple {}s match display_name='{}': {}\n\n"
"Please use the `.name` (unique identifier) field instead"
).format(
object_type,
display_name,
", ".join([str(i) for i in relevant_items]),
)
)
def __location_path(self, project=None, region=None):
if project is None:
if self.project is None:
raise ValueError(
"Either initialize your client with a value "
"for 'project', or provide 'project' as a "
"parameter for this method."
)
project = self.project
if region is None:
if self.region is None:
raise ValueError(
"Either initialize your client with a value "
"for 'region', or provide 'region' as a "
"parameter for this method."
)
region = self.region
return self.auto_ml_client.location_path(project, region)
# the returned metadata object doesn't allow for updating fields, so
# we need to manually copy user-updated fields over
def __update_metadata(self, metadata, k, v):
new_metadata = {}
new_metadata["ml_use_column_spec_id"] = metadata.ml_use_column_spec_id
new_metadata["weight_column_spec_id"] = metadata.weight_column_spec_id
new_metadata["target_column_spec_id"] = metadata.target_column_spec_id
new_metadata[k] = v
return new_metadata
def __dataset_from_args(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
if dataset is None and dataset_display_name is None and dataset_name is None:
raise ValueError(
"One of 'dataset', 'dataset_name' or "
"'dataset_display_name' must be set."
)
# we prefer to make a live call here in the case that the
# dataset object is out-of-date
if dataset is not None:
dataset_name = dataset.name
return self.get_dataset(
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
project=project,
region=region,
**kwargs
)
def __model_from_args(
self,
model=None,
model_display_name=None,
model_name=None,
project=None,
region=None,
**kwargs
):
if model is None and model_display_name is None and model_name is None:
raise ValueError(
"One of 'model', 'model_name' or " "'model_display_name' must be set."
)
# we prefer to make a live call here in the case that the
# model object is out-of-date
if model is not None:
model_name = model.name
return self.get_model(
model_display_name=model_display_name,
model_name=model_name,
project=project,
region=region,
**kwargs
)
def __dataset_name_from_args(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
if dataset is None and dataset_display_name is None and dataset_name is None:
raise ValueError(
"One of 'dataset', 'dataset_name' or "
"'dataset_display_name' must be set."
)
if dataset_name is None:
if dataset is None:
dataset = self.get_dataset(
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
dataset_name = dataset.name
else:
# we do this to force a NotFound error when needed
self.get_dataset(
dataset_name=dataset_name, project=project, region=region, **kwargs
)
return dataset_name
def __table_spec_name_from_args(
self,
table_spec_index=0,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
table_specs = [
t for t in self.list_table_specs(dataset_name=dataset_name, **kwargs)
]
table_spec_full_id = table_specs[table_spec_index].name
return table_spec_full_id
def __model_name_from_args(
self,
model=None,
model_display_name=None,
model_name=None,
project=None,
region=None,
**kwargs
):
if model is None and model_display_name is None and model_name is None:
raise ValueError(
"One of 'model', 'model_name' or " "'model_display_name' must be set."
)
if model_name is None:
if model is None:
model = self.get_model(
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
model_name = model.name
else:
# we do this to force a NotFound error when needed
self.get_model(
model_name=model_name, project=project, region=region, **kwargs
)
return model_name
def __log_operation_info(self, message, op):
name = "UNKNOWN"
try:
if (
op is not None
and op.operation is not None
and op.operation.name is not None
):
name = op.operation.name
except AttributeError:
pass
_LOGGER.info(
(
"Operation '{}' is running in the background. The returned "
"Operation '{}' can be used to query or block on the status "
"of this operation. Ending your python session will _not_ "
"cancel this operation. Read the documentation here:\n\n"
"\thttps://googleapis.dev/python/google-api-core/latest/operation.html\n\n"
"for more information on the Operation class."
).format(message, name)
)
return op
def __column_spec_name_from_args(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
column_spec_name=None,
column_spec_display_name=None,
project=None,
region=None,
**kwargs
):
column_specs = self.list_column_specs(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
project=project,
region=region,
**kwargs
)
if column_spec_display_name is not None:
column_specs = {s.display_name: s for s in column_specs}
if column_specs.get(column_spec_display_name) is None:
raise exceptions.NotFound(
"No column with "
+ "column_spec_display_name: '{}' found".format(
column_spec_display_name
)
)
column_spec_name = column_specs[column_spec_display_name].name
elif column_spec_name is not None:
column_specs = {s.name: s for s in column_specs}
if column_specs.get(column_spec_name) is None:
raise exceptions.NotFound(
"No column with "
+ "column_spec_name: '{}' found".format(column_spec_name)
)
else:
raise ValueError(
"Either supply 'column_spec_name' or "
"'column_spec_display_name' for the column to update"
)
return column_spec_name
def __type_code_to_value_type(self, type_code, value):
if value is None:
return {"null_value": 0}
elif type_code == data_types_pb2.FLOAT64:
return {"number_value": value}
elif type_code == data_types_pb2.TIMESTAMP:
return {"string_value": value}
elif type_code == data_types_pb2.STRING:
return {"string_value": value}
elif type_code == data_types_pb2.ARRAY:
return {"list_value": value}
elif type_code == data_types_pb2.STRUCT:
return {"struct_value": value}
elif type_code == data_types_pb2.CATEGORY:
return {"string_value": value}
else:
raise ValueError("Unknown type_code: {}".format(type_code))
def __ensure_gcs_client_is_initialized(self, credentials, project):
"""Checks if GCS client is initialized. Initializes it if not.
Args:
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
project (str): The ID of the project to use with the GCS
client. If none is specified, the client will attempt to
ascertain the credentials from the environment.
"""
if self.gcs_client is None:
self.gcs_client = gcs_client.GcsClient(
project=project, credentials=credentials
)
def list_datasets(self, project=None, region=None, **kwargs):
"""List all datasets in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> ds = client.list_datasets()
>>>
>>> for d in ds:
... # do something
... pass
...
Args:
project (Optional[str]): The ID of the project that owns the
datasets. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset`
instances. You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
return self.auto_ml_client.list_datasets(
self.__location_path(project=project, region=region), **kwargs
)
def get_dataset(
self,
project=None,
region=None,
dataset_name=None,
dataset_display_name=None,
**kwargs
):
"""Gets a single dataset in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.get_dataset(dataset_display_name='my_dataset')
>>>
Args:
project (Optional[str]): The ID of the project that owns the
dataset. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_name (Optional[str]):
This is the fully-qualified name generated by the AutoML API
for this dataset. This is not to be confused with the
human-assigned `dataset_display_name` that is provided when
creating a dataset. Either `dataset_name` or
`dataset_display_name` must be provided.
dataset_display_name (Optional[str]):
This is the name you provided for the dataset when first
creating it. Either `dataset_name` or `dataset_display_name`
must be provided.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance if
found, `None` otherwise.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
if dataset_name is None and dataset_display_name is None:
raise ValueError(
"One of 'dataset_name' or " "'dataset_display_name' must be set."
)
if dataset_name is not None:
return self.auto_ml_client.get_dataset(dataset_name, **kwargs)
return self.__lookup_by_display_name(
"dataset",
self.list_datasets(project, region, **kwargs),
dataset_display_name,
)
def create_dataset(
self, dataset_display_name, metadata={}, project=None, region=None, **kwargs
):
"""Create a dataset. Keep in mind, importing data is a separate step.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.create_dataset(dataset_display_name='my_dataset')
>>>
Args:
project (Optional[str]): The ID of the project that will own the
dataset. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (str):
A human-readable name to refer to this dataset by.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
return self.auto_ml_client.create_dataset(
self.__location_path(project, region),
{"display_name": dataset_display_name, "tables_dataset_metadata": metadata},
**kwargs
)
def delete_dataset(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
"""Deletes a dataset. This does not delete any models trained on
this dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> op = client.delete_dataset(dataset_display_name='my_dataset')
>>>
>>> op.result() # blocks on delete request
>>>
Args:
project (Optional[str]): The ID of the project that owns the
dataset. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to
delete. This must be supplied if `dataset` or `dataset_name`
are not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
delete. This must be supplied if `dataset_display_name` or
`dataset` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to delete. This must be
supplied if `dataset_display_name` or `dataset_name` are not
supplied.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
try:
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
# delete is idempotent
except exceptions.NotFound:
return None
op = self.auto_ml_client.delete_dataset(dataset_name, **kwargs)
self.__log_operation_info("Delete dataset", op)
return op
def import_data(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
pandas_dataframe=None,
gcs_input_uris=None,
bigquery_input_uri=None,
project=None,
region=None,
credentials=None,
**kwargs
):
"""Imports data into a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.create_dataset(dataset_display_name='my_dataset')
>>>
>>> response = client.import_data(dataset=d,
... gcs_input_uris='gs://cloud-ml-tables-data/bank-marketing.csv')
...
>>> def callback(operation_future):
... result = operation_future.result()
...
>>> response.add_done_callback(callback)
>>>
Args:
project (Optional[str]): The ID of the project that owns the
dataset. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to import
data into. This must be supplied if `dataset` or `dataset_name`
are not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
import data into. This must be supplied if
`dataset_display_name` or `dataset` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to import data into. This must
be supplied if `dataset_display_name` or `dataset_name` are not
supplied.
pandas_dataframe (Optional[pandas.DataFrame]):
A Pandas Dataframe object containing the data to import. The data
will be converted to CSV, and this CSV will be staged to GCS in
`gs://{project}-automl-tables-staging/{uploaded_csv_name}`
This parameter must be supplied if neither `gcs_input_uris` nor
`bigquery_input_uri` is supplied.
gcs_input_uris (Optional[Union[str, Sequence[str]]]):
Either a single `gs://..` prefixed URI, or a list of URIs
referring to GCS-hosted CSV files containing the data to
import. This must be supplied if neither `bigquery_input_uri`
nor `pandas_dataframe` is supplied.
bigquery_input_uri (Optional[str]):
A URI pointing to the BigQuery table containing the data to
import. This must be supplied if neither `gcs_input_uris` nor
`pandas_dataframe` is supplied.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
request = {}
if pandas_dataframe is not None:
project = project or self.project
region = region or self.region
credentials = credentials or self.credentials
self.__ensure_gcs_client_is_initialized(credentials, project)
self.gcs_client.ensure_bucket_exists(project, region)
gcs_input_uri = self.gcs_client.upload_pandas_dataframe(pandas_dataframe)
request = {"gcs_source": {"input_uris": [gcs_input_uri]}}
elif gcs_input_uris is not None:
if type(gcs_input_uris) != list:
gcs_input_uris = [gcs_input_uris]
request = {"gcs_source": {"input_uris": gcs_input_uris}}
elif bigquery_input_uri is not None:
request = {"bigquery_source": {"input_uri": bigquery_input_uri}}
else:
raise ValueError(
"One of 'gcs_input_uris', or 'bigquery_input_uri', or 'pandas_dataframe' must be set."
)
op = self.auto_ml_client.import_data(dataset_name, request, **kwargs)
self.__log_operation_info("Data import", op)
return op
def export_data(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
gcs_output_uri_prefix=None,
bigquery_output_uri=None,
project=None,
region=None,
**kwargs
):
"""Exports data from a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.create_dataset(dataset_display_name='my_dataset')
>>>
>>> response = client.export_data(dataset=d,
... gcs_output_uri_prefix='gs://cloud-ml-tables-data/bank-marketing.csv')
...
>>> def callback(operation_future):
... result = operation_future.result()
...
>>> response.add_done_callback(callback)
>>>
Args:
project (Optional[str]): The ID of the project that owns the
dataset. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to export
data from. This must be supplied if `dataset` or `dataset_name`
are not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
export data from. This must be supplied if
`dataset_display_name` or `dataset` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to export data from. This must
be supplied if `dataset_display_name` or `dataset_name` are not
supplied.
gcs_output_uri_prefix (Optional[Union[str, Sequence[str]]]):
A single `gs://..` prefixed URI to export to. This must be
supplied if `bigquery_output_uri` is not.
bigquery_output_uri (Optional[str]):
A URI pointing to the BigQuery table containing the data to
export. This must be supplied if `gcs_output_uri_prefix` is not.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
request = {}
if gcs_output_uri_prefix is not None:
request = {"gcs_destination": {"output_uri_prefix": gcs_output_uri_prefix}}
elif bigquery_output_uri is not None:
request = {"bigquery_destination": {"output_uri": bigquery_output_uri}}
else:
raise ValueError(
"One of 'gcs_output_uri_prefix', or 'bigquery_output_uri' must be set."
)
op = self.auto_ml_client.export_data(dataset_name, request, **kwargs)
self.__log_operation_info("Export data", op)
return op
def get_table_spec(self, table_spec_name, project=None, region=None, **kwargs):
"""Gets a single table spec in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.get_table_spec('my_table_spec')
>>>
Args:
table_spec_name (str):
This is the fully-qualified name generated by the AutoML API
for this table spec.
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
return self.auto_ml_client.get_table_spec(table_spec_name, **kwargs)
def list_table_specs(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
"""Lists table specs.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> for s in client.list_table_specs(dataset_display_name='my_dataset')
... # process the spec
... pass
...
Args:
project (Optional[str]): The ID of the project that owns the
dataset. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to read
specs from. This must be supplied if `dataset` or
`dataset_name` are not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to read
specs from. This must be supplied if `dataset_display_name` or
`dataset` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to read specs from. This must
be supplied if `dataset_display_name` or `dataset_name` are not
supplied.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of
:class:`~google.cloud.automl_v1beta1.types.TableSpec` instances.
You can also iterate over the pages of the response using its
`pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
return self.auto_ml_client.list_table_specs(dataset_name, **kwargs)
def get_column_spec(self, column_spec_name, project=None, region=None, **kwargs):
"""Gets a single column spec in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.get_column_spec('my_column_spec')
>>>
Args:
column_spec_name (str):
This is the fully-qualified name generated by the AutoML API
for this column spec.
project (Optional[str]): The ID of the project that owns the
column. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
return self.auto_ml_client.get_column_spec(column_spec_name, **kwargs)
def list_column_specs(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
project=None,
region=None,
**kwargs
):
"""Lists column specs.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> for s in client.list_column_specs(dataset_display_name='my_dataset')
... # process the spec
... pass
...
Args:
project (Optional[str]): The ID of the project that owns the
columns. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
table_spec_name (Optional[str]):
The AutoML-assigned name for the table whose specs you want to
read. If not supplied, the client can determine this name from
a source `Dataset` object.
table_spec_index (Optional[int]):
If no `table_spec_name` was provided, we use this index to
determine which table to read column specs from.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to read
specs from. If no `table_spec_name` is supplied, this will be
used together with `table_spec_index` to infer the name of
table to read specs from. This must be supplied if
`table_spec_name`, `dataset` or `dataset_name` are not
supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to read
specs from. If no `table_spec_name` is supplied, this will be
used together with `table_spec_index` to infer the name of
table to read specs from. This must be supplied if
`table_spec_name`, `dataset` or `dataset_display_name` are not
supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to read specs from. If no
`table_spec_name` is supplied, this will be used together with
`table_spec_index` to infer the name of table to read specs
from. This must be supplied if `table_spec_name`,
`dataset_name` or `dataset_display_name` are not supplied.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of
:class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances.
You can also iterate over the pages of the response using its
`pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
if table_spec_name is None:
table_specs = [
t
for t in self.list_table_specs(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
project=project,
region=region,
**kwargs
)
]
table_spec_name = table_specs[table_spec_index].name
return self.auto_ml_client.list_column_specs(table_spec_name, **kwargs)
def update_column_spec(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
column_spec_name=None,
column_spec_display_name=None,
type_code=None,
nullable=None,
project=None,
region=None,
**kwargs
):
"""Updates a column's specs.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.update_column_specs(dataset_display_name='my_dataset',
... column_spec_display_name='Outcome', type_code='CATEGORY')
...
Args:
dataset (Optional[Dataset]):
The `Dataset` instance you want to update specs on. If no
`table_spec_name` is supplied, this will be used together with
`table_spec_index` to infer the name of table to update specs
on. This must be supplied if `table_spec_name`, `dataset_name`
or `dataset_display_name` are not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
specs on. If no `table_spec_name` is supplied, this will be
used together with `table_spec_index` to infer the name of
table to update specs on. This must be supplied if
`table_spec_name`, `dataset` or `dataset_name` are not
supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update specs one. If no `table_spec_name` is supplied, this
will be used together with `table_spec_index` to infer the name
of table to update specs on. This must be supplied if
`table_spec_name`, `dataset` or `dataset_display_name` are not
supplied.
table_spec_name (Optional[str]):
The AutoML-assigned name for the table whose specs you want to
update. If not supplied, the client can determine this name
from a source `Dataset` object.
table_spec_index (Optional[int]):
If no `table_spec_name` was provided, we use this index to
determine which table to update column specs on.
column_spec_name (Optional[str]):
The name AutoML-assigned name for the column you want to
update.
column_spec_display_name (Optional[str]):
The human-readable name of the column you want to update. If
this is supplied in place of `column_spec_name`, you also need
to provide either a way to lookup the source dataset (using one
of the `dataset*` kwargs), or the `table_spec_name` of the
table this column belongs to.
type_code (Optional[str]):
The desired 'type_code' of the column. For more information
on the available types, please see the documentation:
https://cloud.google.com/automl-tables/docs/reference/rpc/google.cloud.automl.v1beta1#typecode
nullable (Optional[bool]):
Set to `True` or `False` to specify if this column's value
must expected to be present in all rows or not.
project (Optional[str]): The ID of the project that owns the
columns. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
column_spec_name = self.__column_spec_name_from_args(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
column_spec_name=column_spec_name,
column_spec_display_name=column_spec_display_name,
project=project,
region=region,
**kwargs
)
# type code must always be set
if type_code is None:
# this index is safe, we would have already thrown a NotFound
# had the column_spec_name not existed
type_code = {
s.name: s
for s in self.list_column_specs(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
project=project,
region=region,
**kwargs
)
}[column_spec_name].data_type.type_code
data_type = {}
if nullable is not None:
data_type["nullable"] = nullable
data_type["type_code"] = type_code
request = {"name": column_spec_name, "data_type": data_type}
return self.auto_ml_client.update_column_spec(request, **kwargs)
def set_target_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
column_spec_name=None,
column_spec_display_name=None,
project=None,
region=None,
**kwargs
):
"""Sets the target column for a given table.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.set_target_column(dataset_display_name='my_dataset',
... column_spec_display_name='Income')
...
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
column_spec_name (Optional[str]):
The name AutoML-assigned name for the column you want to set as
the target column.
column_spec_display_name (Optional[str]):
The human-readable name of the column you want to set as the
target column. If this is supplied in place of
`column_spec_name`, you also need to provide either a way to
lookup the source dataset (using one of the `dataset*` kwargs),
or the `table_spec_name` of the table this column belongs to.
table_spec_name (Optional[str]):
The AutoML-assigned name for the table whose target column you
want to set . If not supplied, the client can determine this
name from a source `Dataset` object.
table_spec_index (Optional[int]):
If no `table_spec_name` or `column_spec_name` was provided, we
use this index to determine which table to set the target
column on.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the target column of. If no `table_spec_name` is supplied, this
will be used together with `table_spec_index` to infer the name
of table to update the target column of. This must be supplied
if `table_spec_name`, `dataset` or `dataset_name` are not
supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the target column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the target column of. This
must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the target column of.
If no `table_spec_name` is supplied, this will be used together
with `table_spec_index` to infer the name of table to update
the target column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
column_spec_name = self.__column_spec_name_from_args(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
column_spec_name=column_spec_name,
column_spec_display_name=column_spec_display_name,
project=project,
region=region,
**kwargs
)
column_spec_id = column_spec_name.rsplit("/", 1)[-1]
dataset = self.__dataset_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
metadata = dataset.tables_dataset_metadata
metadata = self.__update_metadata(
metadata, "target_column_spec_id", column_spec_id
)
request = {"name": dataset.name, "tables_dataset_metadata": metadata}
return self.auto_ml_client.update_dataset(request, **kwargs)
def set_time_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
column_spec_name=None,
column_spec_display_name=None,
project=None,
region=None,
**kwargs
):
"""Sets the time column which designates which data will be of type
timestamp and will be used for the timeseries data.
This column must be of type timestamp.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.set_time_column(dataset_display_name='my_dataset',
... column_spec_display_name='Unix Time')
...
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
column_spec_name (Optional[str]):
The name AutoML-assigned name for the column you want to set as
the time column.
column_spec_display_name (Optional[str]):
The human-readable name of the column you want to set as the
time column. If this is supplied in place of
`column_spec_name`, you also need to provide either a way to
lookup the source dataset (using one of the `dataset*` kwargs),
or the `table_spec_name` of the table this column belongs to.
table_spec_name (Optional[str]):
The AutoML-assigned name for the table whose time column
you want to set . If not supplied, the client can determine
this name from a source `Dataset` object.
table_spec_index (Optional[int]):
If no `table_spec_name` or `column_spec_name` was provided, we
use this index to determine which table to set the time
column on.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the time column of. If no `table_spec_name` is supplied,
this will be used together with `table_spec_index` to infer the
name of table to update the time column of. This must be
supplied if `table_spec_name`, `dataset` or `dataset_name` are
not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the time column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the time column of.
This must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the time column
of. If no `table_spec_name` is supplied, this will be used
together with `table_spec_index` to infer the name of table to
update the time column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
column_spec_name = self.__column_spec_name_from_args(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
column_spec_name=column_spec_name,
column_spec_display_name=column_spec_display_name,
project=project,
region=region,
**kwargs
)
column_spec_id = column_spec_name.rsplit("/", 1)[-1]
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
table_spec_full_id = self.__table_spec_name_from_args(
dataset_name=dataset_name, **kwargs
)
my_table_spec = {
"name": table_spec_full_id,
"time_column_spec_id": column_spec_id,
}
return self.auto_ml_client.update_table_spec(my_table_spec, **kwargs)
def clear_time_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
"""Clears the time column which designates which data will be of type
timestamp and will be used for the timeseries data.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.clear_time_column(dataset_display_name='my_dataset')
>>>
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the time column of. If no `table_spec_name` is supplied,
this will be used together with `table_spec_index` to infer the
name of table to update the time column of. This must be
supplied if `table_spec_name`, `dataset` or `dataset_name` are
not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the time column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the time column of.
This must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the time column
of. If no `table_spec_name` is supplied, this will be used
together with `table_spec_index` to infer the name of table to
update the time column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
table_spec_full_id = self.__table_spec_name_from_args(
dataset_name=dataset_name, **kwargs
)
my_table_spec = {"name": table_spec_full_id, "time_column_spec_id": None}
return self.auto_ml_client.update_table_spec(my_table_spec, **kwargs)
def set_weight_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
column_spec_name=None,
column_spec_display_name=None,
project=None,
region=None,
**kwargs
):
"""Sets the weight column for a given table.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.set_weight_column(dataset_display_name='my_dataset',
... column_spec_display_name='Income')
...
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
column_spec_name (Optional[str]):
The name AutoML-assigned name for the column you want to
set as the weight column.
column_spec_display_name (Optional[str]):
The human-readable name of the column you want to set as the
weight column. If this is supplied in place of
`column_spec_name`, you also need to provide either a way to
lookup the source dataset (using one of the `dataset*` kwargs),
or the `table_spec_name` of the table this column belongs to.
table_spec_name (Optional[str]):
The AutoML-assigned name for the table whose weight column you
want to set . If not supplied, the client can determine this
name from a source `Dataset` object.
table_spec_index (Optional[int]):
If no `table_spec_name` or `column_spec_name` was provided, we
use this index to determine which table to set the weight
column on.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the weight column of. If no `table_spec_name` is supplied, this
will be used together with `table_spec_index` to infer the name
of table to update the weight column of. This must be supplied
if `table_spec_name`, `dataset` or `dataset_name` are not
supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the weight column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the weight column of. This
must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the weight column of.
If no `table_spec_name` is supplied, this will be used together
with `table_spec_index` to infer the name of table to update
the weight column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
column_spec_name = self.__column_spec_name_from_args(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
column_spec_name=column_spec_name,
column_spec_display_name=column_spec_display_name,
project=project,
region=region,
**kwargs
)
column_spec_id = column_spec_name.rsplit("/", 1)[-1]
dataset = self.__dataset_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
metadata = dataset.tables_dataset_metadata
metadata = self.__update_metadata(
metadata, "weight_column_spec_id", column_spec_id
)
request = {"name": dataset.name, "tables_dataset_metadata": metadata}
return self.auto_ml_client.update_dataset(request, **kwargs)
def clear_weight_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
"""Clears the weight column for a given dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.clear_weight_column(dataset_display_name='my_dataset')
>>>
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the weight column of. If no `table_spec_name` is supplied, this
will be used together with `table_spec_index` to infer the name
of table to update the weight column of. This must be supplied
if `table_spec_name`, `dataset` or `dataset_name` are not
supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the weight column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the weight column of. This
must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the weight column of.
If no `table_spec_name` is supplied, this will be used together
with `table_spec_index` to infer the name of table to update
the weight column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
dataset = self.__dataset_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
metadata = dataset.tables_dataset_metadata
metadata = self.__update_metadata(metadata, "weight_column_spec_id", None)
request = {"name": dataset.name, "tables_dataset_metadata": metadata}
return self.auto_ml_client.update_dataset(request, **kwargs)
def set_test_train_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
table_spec_name=None,
table_spec_index=0,
column_spec_name=None,
column_spec_display_name=None,
project=None,
region=None,
**kwargs
):
"""Sets the test/train (ml_use) column which designates which data
belongs to the test and train sets. This column must be categorical.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.set_test_train_column(dataset_display_name='my_dataset',
... column_spec_display_name='TestSplit')
...
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
column_spec_name (Optional[str]):
The name AutoML-assigned name for the column you want to set as
the test/train column.
column_spec_display_name (Optional[str]):
The human-readable name of the column you want to set as the
test/train column. If this is supplied in place of
`column_spec_name`, you also need to provide either a way to
lookup the source dataset (using one of the `dataset*` kwargs),
or the `table_spec_name` of the table this column belongs to.
table_spec_name (Optional[str]):
The AutoML-assigned name for the table whose test/train column
you want to set . If not supplied, the client can determine
this name from a source `Dataset` object.
table_spec_index (Optional[int]):
If no `table_spec_name` or `column_spec_name` was provided, we
use this index to determine which table to set the test/train
column on.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the test/train column of. If no `table_spec_name` is supplied,
this will be used together with `table_spec_index` to infer the
name of table to update the test/train column of. This must be
supplied if `table_spec_name`, `dataset` or `dataset_name` are
not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the test/train column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the test/train column of.
This must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the test/train column
of. If no `table_spec_name` is supplied, this will be used
together with `table_spec_index` to infer the name of table to
update the test/train column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
column_spec_name = self.__column_spec_name_from_args(
dataset=dataset,
dataset_display_name=dataset_display_name,
dataset_name=dataset_name,
table_spec_name=table_spec_name,
table_spec_index=table_spec_index,
column_spec_name=column_spec_name,
column_spec_display_name=column_spec_display_name,
project=project,
region=region,
**kwargs
)
column_spec_id = column_spec_name.rsplit("/", 1)[-1]
dataset = self.__dataset_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
metadata = dataset.tables_dataset_metadata
metadata = self.__update_metadata(
metadata, "ml_use_column_spec_id", column_spec_id
)
request = {"name": dataset.name, "tables_dataset_metadata": metadata}
return self.auto_ml_client.update_dataset(request, **kwargs)
def clear_test_train_column(
self,
dataset=None,
dataset_display_name=None,
dataset_name=None,
project=None,
region=None,
**kwargs
):
"""Clears the test/train (ml_use) column which designates which data
belongs to the test and train sets.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.clear_test_train_column(dataset_display_name='my_dataset')
>>>
Args:
project (Optional[str]): The ID of the project that owns the
table. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to update
the test/train column of. If no `table_spec_name` is supplied,
this will be used together with `table_spec_index` to infer the
name of table to update the test/train column of. This must be
supplied if `table_spec_name`, `dataset` or `dataset_name` are
not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to
update the test/train column of. If no `table_spec_name` is
supplied, this will be used together with `table_spec_index` to
infer the name of table to update the test/train column of.
This must be supplied if `table_spec_name`, `dataset` or
`dataset_display_name` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to update the test/train column
of. If no `table_spec_name` is supplied, this will be used
together with `table_spec_index` to infer the name of table to
update the test/train column of. This must be supplied if
`table_spec_name`, `dataset_name` or `dataset_display_name` are
not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
dataset = self.__dataset_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
metadata = dataset.tables_dataset_metadata
metadata = self.__update_metadata(metadata, "ml_use_column_spec_id", None)
request = {"name": dataset.name, "tables_dataset_metadata": metadata}
return self.auto_ml_client.update_dataset(request, **kwargs)
def list_models(self, project=None, region=None, **kwargs):
"""List all models in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> ms = client.list_models()
>>>
>>> for m in ms:
... # do something
... pass
...
Args:
project (Optional[str]): The ID of the project that owns the
models. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.automl_v1beta1.types.Model`
instances. You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
return self.auto_ml_client.list_models(
self.__location_path(project=project, region=region), **kwargs
)
def list_model_evaluations(
self,
project=None,
region=None,
model=None,
model_display_name=None,
model_name=None,
**kwargs
):
"""List all model evaluations for a given model.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> ms = client.list_model_evaluations(model_display_name='my_model')
>>>
>>> for m in ms:
... # do something
... pass
...
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
model_display_name (Optional[str]):
The human-readable name given to the model you want to list
evaluations for. This must be supplied if `model` or
`model_name` are not supplied.
model_name (Optional[str]):
The AutoML-assigned name given to the model you want to list
evaluations for. This must be supplied if `model_display_name`
or `model` are not supplied.
model (Optional[model]):
The `model` instance you want to list evaluations for. This
must be supplied if `model_display_name` or `model_name` are
not supplied.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of
:class:`~google.cloud.automl_v1beta1.types.ModelEvaluation`
instances. You can also iterate over the pages of the response
using its `pages` property.
For a regression model, there will only be one evaluation. For a
classification model there will be on for each classification
label, as well as one for micro-averaged metrics. See more
documentation here:
https://cloud.google.com/automl-tables/docs/evaluate#automl-tables-list-model-evaluations-cli-curl:w
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
model_name = self.__model_name_from_args(
model=model,
model_name=model_name,
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
return self.auto_ml_client.list_model_evaluations(model_name, **kwargs)
def create_model(
self,
model_display_name,
dataset=None,
dataset_display_name=None,
dataset_name=None,
train_budget_milli_node_hours=None,
optimization_objective=None,
project=None,
region=None,
model_metadata=None,
include_column_spec_names=None,
exclude_column_spec_names=None,
disable_early_stopping=False,
**kwargs
):
"""Create a model. This will train your model on the given dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> m = client.create_model(
... 'my_model',
... dataset_display_name='my_dataset',
... train_budget_milli_node_hours=1000
... )
>>>
>>> m.result() # blocks on result
>>>
Args:
project (Optional[str]): The ID of the project that will own the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
model_display_name (str):
A human-readable name to refer to this model by.
train_budget_milli_node_hours (int):
The amount of time (in thousandths of an hour) to spend
training. This value must be between 1,000 and 72,000 inclusive
(between 1 and 72 hours).
optimization_objective (str):
The metric AutoML tables should optimize for.
dataset_display_name (Optional[str]):
The human-readable name given to the dataset you want to train
your model on. This must be supplied if `dataset` or
`dataset_name` are not supplied.
dataset_name (Optional[str]):
The AutoML-assigned name given to the dataset you want to train
your model on. This must be supplied if `dataset_display_name`
or `dataset` are not supplied.
dataset (Optional[Dataset]):
The `Dataset` instance you want to train your model on. This
must be supplied if `dataset_display_name` or `dataset_name`
are not supplied.
model_metadata (Optional[Dict]):
Optional model metadata to supply to the client.
include_column_spec_names(Optional[str]):
The list of the names of the columns you want to include to train
your model on.
exclude_column_spec_names(Optional[str]):
The list of the names of the columns you want to exclude and
not train your model on.
disable_early_stopping(Optional[bool]):
True if disable early stopping. By default, the early stopping
feature is enabled, which means that AutoML Tables might stop
training before the entire training budget has been used.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
if model_metadata is None:
model_metadata = {}
if (
train_budget_milli_node_hours is None
or train_budget_milli_node_hours < 1000
or train_budget_milli_node_hours > 72000
):
raise ValueError(
"'train_budget_milli_node_hours' must be a "
"value between 1,000 and 72,000 inclusive"
)
if exclude_column_spec_names not in [
None,
[],
] and include_column_spec_names not in [None, []]:
raise ValueError(
"Cannot set both "
"'exclude_column_spec_names' and "
"'include_column_spec_names'"
)
dataset_name = self.__dataset_name_from_args(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
project=project,
region=region,
**kwargs
)
model_metadata["train_budget_milli_node_hours"] = train_budget_milli_node_hours
if optimization_objective is not None:
model_metadata["optimization_objective"] = optimization_objective
if disable_early_stopping:
model_metadata["disable_early_stopping"] = True
dataset_id = dataset_name.rsplit("/", 1)[-1]
columns = [
s
for s in self.list_column_specs(
dataset=dataset,
dataset_name=dataset_name,
dataset_display_name=dataset_display_name,
**kwargs
)
]
final_columns = []
if include_column_spec_names:
for c in columns:
if c.display_name in include_column_spec_names:
final_columns.append(c)
model_metadata["input_feature_column_specs"] = final_columns
elif exclude_column_spec_names:
for a in columns:
if a.display_name not in exclude_column_spec_names:
final_columns.append(a)
model_metadata["input_feature_column_specs"] = final_columns
request = {
"display_name": model_display_name,
"dataset_id": dataset_id,
"tables_model_metadata": model_metadata,
}
op = self.auto_ml_client.create_model(
self.__location_path(project=project, region=region), request, **kwargs
)
self.__log_operation_info("Model creation", op)
return op
def delete_model(
self,
model=None,
model_display_name=None,
model_name=None,
project=None,
region=None,
**kwargs
):
"""Deletes a model. Note this will not delete any datasets associated
with this model.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> op = client.delete_model(model_display_name='my_model')
>>>
>>> op.result() # blocks on delete request
>>>
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
model_display_name (Optional[str]):
The human-readable name given to the model you want to
delete. This must be supplied if `model` or `model_name`
are not supplied.
model_name (Optional[str]):
The AutoML-assigned name given to the model you want to
delete. This must be supplied if `model_display_name` or
`model` are not supplied.
model (Optional[model]):
The `model` instance you want to delete. This must be
supplied if `model_display_name` or `model_name` are not
supplied.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
try:
model_name = self.__model_name_from_args(
model=model,
model_name=model_name,
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
# delete is idempotent
except exceptions.NotFound:
return None
op = self.auto_ml_client.delete_model(model_name, **kwargs)
self.__log_operation_info("Delete model", op)
return op
def get_model_evaluation(
self, model_evaluation_name, project=None, region=None, **kwargs
):
"""Gets a single evaluation model in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.get_model_evaluation('my_model_evaluation')
>>>
Args:
model_evaluation_name (str):
This is the fully-qualified name generated by the AutoML API
for this model evaluation.
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
return self.auto_ml_client.get_model_evaluation(model_evaluation_name, **kwargs)
def get_model(
self,
project=None,
region=None,
model_name=None,
model_display_name=None,
**kwargs
):
"""Gets a single model in a particular project and region.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> d = client.get_model(model_display_name='my_model')
>>>
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
model_name (Optional[str]):
This is the fully-qualified name generated by the AutoML API
for this model. This is not to be confused with the
human-assigned `model_display_name` that is provided when
creating a model. Either `model_name` or
`model_display_name` must be provided.
model_display_name (Optional[str]):
This is the name you provided for the model when first
creating it. Either `model_name` or `model_display_name`
must be provided.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Model` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
if model_name is None and model_display_name is None:
raise ValueError(
"One of 'model_name' or " "'model_display_name' must be set."
)
if model_name is not None:
return self.auto_ml_client.get_model(model_name, **kwargs)
return self.__lookup_by_display_name(
"model", self.list_models(project, region, **kwargs), model_display_name
)
# TODO(jonathanskim): allow deployment from just model ID
def deploy_model(
self,
model=None,
model_name=None,
model_display_name=None,
project=None,
region=None,
**kwargs
):
"""Deploys a model. This allows you make online predictions using the
model you've deployed.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> op = client.deploy_model(model_display_name='my_model')
>>>
>>> op.result() # blocks on deploy request
>>>
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
model_display_name (Optional[str]):
The human-readable name given to the model you want to
deploy. This must be supplied if `model` or `model_name`
are not supplied.
model_name (Optional[str]):
The AutoML-assigned name given to the model you want to
deploy. This must be supplied if `model_display_name` or
`model` are not supplied.
model (Optional[model]):
The `model` instance you want to deploy. This must be
supplied if `model_display_name` or `model_name` are not
supplied.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
model_name = self.__model_name_from_args(
model=model,
model_name=model_name,
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
op = self.auto_ml_client.deploy_model(model_name, **kwargs)
self.__log_operation_info("Deploy model", op)
return op
def undeploy_model(
self,
model=None,
model_name=None,
model_display_name=None,
project=None,
region=None,
**kwargs
):
"""Undeploys a model.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> op = client.undeploy_model(model_display_name='my_model')
>>>
>>> op.result() # blocks on undeploy request
>>>
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
model_display_name (Optional[str]):
The human-readable name given to the model you want to
undeploy. This must be supplied if `model` or `model_name`
are not supplied.
model_name (Optional[str]):
The AutoML-assigned name given to the model you want to
undeploy. This must be supplied if `model_display_name` or
`model` are not supplied.
model (Optional[model]):
The `model` instance you want to undeploy. This must be
supplied if `model_display_name` or `model_name` are not
supplied.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
model_name = self.__model_name_from_args(
model=model,
model_name=model_name,
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
op = self.auto_ml_client.undeploy_model(model_name, **kwargs)
self.__log_operation_info("Undeploy model", op)
return op
## TODO(lwander): support pandas DataFrame as input type
def predict(
self,
inputs,
model=None,
model_name=None,
model_display_name=None,
project=None,
region=None,
**kwargs
):
"""Makes a prediction on a deployed model. This will fail if the model
was not deployed.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.predict(inputs={'Age': 30, 'Income': 12, 'Category': 'A'}
... model_display_name='my_model')
...
>>> client.predict([30, 12, 'A'], model_display_name='my_model')
>>>
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
inputs (Union[List[str], Dict[str, str]]):
Either the sorted list of column values to predict with, or a
key-value map of column display name to value to predict with.
model_display_name (Optional[str]):
The human-readable name given to the model you want to predict
with. This must be supplied if `model` or `model_name` are not
supplied.
model_name (Optional[str]):
The AutoML-assigned name given to the model you want to predict
with. This must be supplied if `model_display_name` or `model`
are not supplied.
model (Optional[model]):
The `model` instance you want to predict with . This must be
supplied if `model_display_name` or `model_name` are not
supplied.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.PredictResponse`
instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
model = self.__model_from_args(
model=model,
model_name=model_name,
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
column_specs = model.tables_model_metadata.input_feature_column_specs
if type(inputs) == dict:
inputs = [inputs.get(c.display_name, None) for c in column_specs]
if len(inputs) != len(column_specs):
raise ValueError(
(
"Dimension mismatch, the number of provided "
"inputs ({}) does not match that of the model "
"({})"
).format(len(inputs), len(column_specs))
)
values = []
for i, c in zip(inputs, column_specs):
value_type = self.__type_code_to_value_type(c.data_type.type_code, i)
values.append(value_type)
request = {"row": {"values": values}}
return self.prediction_client.predict(model.name, request, **kwargs)
def batch_predict(
self,
pandas_dataframe=None,
bigquery_input_uri=None,
bigquery_output_uri=None,
gcs_input_uris=None,
gcs_output_uri_prefix=None,
model=None,
model_name=None,
model_display_name=None,
project=None,
region=None,
credentials=None,
inputs=None,
**kwargs
):
"""Makes a batch prediction on a model. This does _not_ require the
model to be deployed.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> from google.oauth2 import service_account
>>>
>>> client = automl_v1beta1.TablesClient(
... credentials=service_account.Credentials.from_service_account_file('~/.gcp/account.json')
... project='my-project', region='us-central1')
...
>>> client.batch_predict(
... gcs_input_uris='gs://inputs/input.csv',
... gcs_output_uri_prefix='gs://outputs/',
... model_display_name='my_model'
... ).result()
...
Args:
project (Optional[str]): The ID of the project that owns the
model. If you have initialized the client with a value for
`project` it will be used if this parameter is not supplied.
Keep in mind, the service account this client was initialized
with must have access to this project.
region (Optional[str]):
If you have initialized the client with a value for `region` it
will be used if this parameter is not supplied.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
pandas_dataframe (Optional[pandas.DataFrame]):
A Pandas Dataframe object containing the data you want to predict
off of. The data will be converted to CSV, and this CSV will be
staged to GCS in `gs://{project}-automl-tables-staging/{uploaded_csv_name}`
This must be supplied if neither `gcs_input_uris` nor
`bigquery_input_uri` is supplied.
gcs_input_uris (Optional(Union[List[str], str]))
Either a list of or a single GCS URI containing the data you
want to predict off of. This must be supplied if neither
`pandas_dataframe` nor `bigquery_input_uri` is supplied.
gcs_output_uri_prefix (Optional[str])
The folder in GCS you want to write output to. This must be
supplied if `bigquery_output_uri` is not.
bigquery_input_uri (Optional[str])
The BigQuery table to input data from. This must be supplied if
neither `pandas_dataframe` nor `gcs_input_uris` is supplied.
bigquery_output_uri (Optional[str])
The BigQuery table to output data to. This must be supplied if
`gcs_output_uri_prefix` is not.
model_display_name (Optional[str]):
The human-readable name given to the model you want to predict
with. This must be supplied if `model` or `model_name` are not
supplied.
model_name (Optional[str]):
The AutoML-assigned name given to the model you want to predict
with. This must be supplied if `model_display_name` or `model`
are not supplied.
model (Optional[model]):
The `model` instance you want to predict with . This must be
supplied if `model_display_name` or `model_name` are not
supplied.
Returns:
google.api_core.operation.Operation:
An operation future that can be used to check for
completion synchronously or asynchronously.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If required parameters are missing.
"""
model_name = self.__model_name_from_args(
model=model,
model_name=model_name,
model_display_name=model_display_name,
project=project,
region=region,
**kwargs
)
input_request = None
if pandas_dataframe is not None:
project = project or self.project
region = region or self.region
credentials = credentials or self.credentials
self.__ensure_gcs_client_is_initialized(credentials, project)
self.gcs_client.ensure_bucket_exists(project, region)
gcs_input_uri = self.gcs_client.upload_pandas_dataframe(pandas_dataframe)
input_request = {"gcs_source": {"input_uris": [gcs_input_uri]}}
elif gcs_input_uris is not None:
if type(gcs_input_uris) != list:
gcs_input_uris = [gcs_input_uris]
input_request = {"gcs_source": {"input_uris": gcs_input_uris}}
elif bigquery_input_uri is not None:
input_request = {"bigquery_source": {"input_uri": bigquery_input_uri}}
else:
raise ValueError(
"One of 'gcs_input_uris'/'bigquery_input_uris' must" "be set"
)
output_request = None
if gcs_output_uri_prefix is not None:
output_request = {
"gcs_destination": {"output_uri_prefix": gcs_output_uri_prefix}
}
elif bigquery_output_uri is not None:
output_request = {
"bigquery_destination": {"output_uri": bigquery_output_uri}
}
else:
raise ValueError(
"One of 'gcs_output_uri_prefix'/'bigquery_output_uri' must be set"
)
op = self.prediction_client.batch_predict(
model_name, input_request, output_request, **kwargs
)
self.__log_operation_info("Batch predict", op)
return op
| apache-2.0 |
mainakibui/kobocat | onadata/apps/viewer/tests/test_export_list.py | 5 | 8203 | import os
from django.core.urlresolvers import reverse
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.viewer.models.export import Export
from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.viewer.views import export_list
class TestExportList(TestBase):
def setUp(self):
super(TestExportList, self).setUp()
self._publish_transportation_form()
survey = self.surveys[0]
self._make_submission(
os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', survey, survey + '.xml'))
def test_unauthorised_users_cannot_export_form_data(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
# check that the 'New Export' button is not being rendered
self.assertNotIn(
'<input title="" data-original-title="" \
class="btn large btn-primary" \
value="New Export" type="submit">', response.content)
self.assertEqual(response.status_code, 200)
def test_csv_export_list(self):
kwargs = {'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper(),
'export_type': Export.CSV_EXPORT}
# test csv
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_xls_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_kml_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.KML_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_gdoc_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.GDOC_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_csv_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_sav_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.SAV_ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_external_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.EXTERNAL_EXPORT}
server = 'http://localhost:8080/xls/23fa4c38c0054748a984ffd89021a295'
data_value = 'template 1 |{0}'.format(server)
meta = MetaData.external_export(self.xform, data_value)
custom_params = {
'meta': meta.id,
}
url = reverse(export_list, kwargs=kwargs)
count = len(Export.objects.all())
response = self.client.get(url, custom_params)
self.assertEqual(response.status_code, 200)
count1 = len(Export.objects.all())
self.assertEquals(count+1, count1)
def test_external_export_list_no_template(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.EXTERNAL_EXPORT}
url = reverse(export_list, kwargs=kwargs)
count = len(Export.objects.all())
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertEquals(response.content, u'No XLS Template set.')
count1 = len(Export.objects.all())
self.assertEquals(count, count1)
class TestDataExportURL(TestBase):
def setUp(self):
super(TestDataExportURL, self).setUp()
self._publish_transportation_form()
def _filename_from_disposition(self, content_disposition):
filename_pos = content_disposition.index('filename=')
self.assertTrue(filename_pos != -1)
return content_disposition[filename_pos + len('filename='):]
def test_csv_export_url(self):
self._submit_transport_instance()
url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/csv')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.csv')
def test_csv_export_url_without_records(self):
# csv using the pandas path can throw a NoRecordsFound Exception -
# handle it gracefully
url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_xls_export_url(self):
self._submit_transport_instance()
url = reverse('xls_export', kwargs={
'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper(),
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'],
'application/vnd.openxmlformats')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.xlsx')
def test_csv_zip_export_url(self):
self._submit_transport_instance()
url = reverse('csv_zip_export', kwargs={
'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper(),
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/zip')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.zip')
def test_sav_zip_export_url(self):
self._submit_transport_instance()
url = reverse('sav_zip_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/zip')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.zip')
| bsd-2-clause |
saguziel/incubator-airflow | airflow/hooks/base_hook.py | 5 | 2571 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
session.close()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
redcap-tools/PyCap | redcap/project.py | 2 | 36294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""User facing class for interacting with a REDCap Project"""
import json
import warnings
import semantic_version
from .request import RCRequest, RedcapError, RequestException
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__author__ = "Scott Burns <scott.s.burnsgmail.com>"
__license__ = "MIT"
__copyright__ = "2014, Vanderbilt University"
# pylint: disable=too-many-lines
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-many-public-methods
# pylint: disable=redefined-builtin
class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name="", verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
"""Fill in project attributes"""
try:
self.metadata = self.__md()
except RequestException as request_fail:
raise RedcapError(
"Exporting metadata failed. Check your URL and token."
) from request_fail
try:
self.redcap_version = self.__rcv()
except Exception as general_fail:
raise RedcapError(
"Determination of REDCap version failed"
) from general_fail
self.field_names = self.filter_metadata("field_name")
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata("field_label")
self.forms = tuple(set(c["form_name"] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl("event"), "exp_event")[0]
arm_data = self._call_api(self.__basepl("arm"), "exp_arm")[0]
if isinstance(ev_data, dict) and ("error" in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ("error" in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a["arm_num"] for a in arm_data])
arm_names = tuple([a["name"] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl("metadata")
p_l["content"] = "metadata"
return self._call_api(p_l, "metadata")[0]
def __basepl(self, content, rec_type="flat", format="json"):
"""Return a dictionary which can be used as is or added to for
payloads"""
payload_dict = {"token": self.token, "content": content, "format": format}
if content not in ["metapayload_dictata", "file"]:
payload_dict["type"] = rec_type
return payload_dict
def __rcv(self):
payload = self.__basepl("version")
rcv = self._call_api(payload, "version")[0].decode("utf-8")
if "error" in rcv:
warnings.warn("Version information not available for this REDCap instance")
return ""
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return (
len(self.events) > 0 and len(self.arm_nums) > 0 and len(self.arm_names) > 0
)
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {"verify": self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format="json", df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("formEventMapping", format=ret_format)
if arms:
for i, value in enumerate(arms):
payload["arms[{}]".format(i)] = value
response, _ = self._call_api(payload, "exp_fem")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {}
return self.read_csv(StringIO(response), **df_kwargs)
def export_field_names(self, field=None, format="json", df_kwargs=None):
"""
Export the project's export field names
Parameters
----------
fields : str
Limit exported field name to this field (only single field supported).
When not provided, all fields returned.
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'original_field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata structure for the project.
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("exportFieldNames", format=ret_format)
if field:
payload["field"] = field
response, _ = self._call_api(payload, "exp_field_names")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {"index_col": "original_field_name"}
return self.read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format="json", df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("metadata", format=ret_format)
to_add = [fields, forms]
str_add = ["fields", "forms"]
for key, data in zip(str_add, to_add):
if data:
for i, value in enumerate(data):
payload["{}[{}]".format(key, i)] = value
response, _ = self._call_api(payload, "metadata")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {"index_col": "field_name"}
return self.read_csv(StringIO(response), **df_kwargs)
def delete_records(self, records):
"""
Delete records from the Project.
Parameters
----------
records : list
List of record IDs that you want to delete from the project
Returns
-------
response : int
Number of records deleted
"""
payload = dict()
payload["action"] = "delete"
payload["content"] = "record"
payload["token"] = self.token
# Turn list of records into dict, and append to payload
records_dict = {
"records[{}]".format(idx): record for idx, record in enumerate(records)
}
payload.update(records_dict)
payload["format"] = format
response, _ = self._call_api(payload, "del_record")
return response
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
def export_records(
self,
records=None,
fields=None,
forms=None,
events=None,
raw_or_label="raw",
event_name="label",
format="json",
export_survey_fields=False,
export_data_access_groups=False,
df_kwargs=None,
export_checkbox_labels=False,
filter_logic=None,
date_begin=None,
date_end=None,
):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
date_begin : datetime
for the dateRangeStart filtering of the API
date_end : datetime
for the dateRangeEnd filtering snet to the API
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("record", format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (
records,
fields,
forms,
events,
raw_or_label,
event_name,
export_survey_fields,
export_data_access_groups,
export_checkbox_labels,
)
str_keys = (
"records",
"fields",
"forms",
"events",
"rawOrLabel",
"eventName",
"exportSurveyFields",
"exportDataAccessGroups",
"exportCheckboxLabel",
)
for key, data in zip(str_keys, keys_to_add):
if data:
if key in ("fields", "records", "forms", "events"):
for i, value in enumerate(data):
payload["{}[{}]".format(key, i)] = value
else:
payload[key] = data
if date_begin:
payload["dateRangeBegin"] = date_begin.strftime("%Y-%m-%d %H:%M:%S")
if date_end:
payload["dateRangeEnd"] = date_end.strftime("%Y-%m-%d %H:%M:%S")
if filter_logic:
payload["filterLogic"] = filter_logic
response, _ = self._call_api(payload, "exp_record")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]}
else:
df_kwargs = {"index_col": self.def_field}
buf = StringIO(response)
dataframe = self.read_csv(buf, **df_kwargs)
buf.close()
return dataframe
# pylint: enable=too-many-branches
# pylint: enable=too-many-locals
# pylint: disable=import-outside-toplevel
@staticmethod
def read_csv(buf, **df_kwargs):
"""Wrapper around pandas read_csv that handles EmptyDataError"""
from pandas import DataFrame, read_csv
from pandas.errors import EmptyDataError
try:
dataframe = read_csv(buf, **df_kwargs)
except EmptyDataError:
dataframe = DataFrame()
return dataframe
# pylint: enable=import-outside-toplevel
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(
field_name, "text_validation_type_or_show_slider_number"
)
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
metadata_field = ""
try:
metadata_field = str(
[f[key] for f in self.metadata if f["field_name"] == field][0]
)
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return metadata_field
else:
return metadata_field
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print("%s --> %s" % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(
self,
to_import,
overwrite="normal",
format="json",
return_format="json",
return_content="count",
date_format="YMD",
force_auto_number=False,
):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
payload = self._initialize_import_payload(to_import, format, "record")
payload["overwriteBehavior"] = overwrite
payload["returnFormat"] = return_format
payload["returnContent"] = return_content
payload["dateFormat"] = date_format
payload["forceAutoNumber"] = force_auto_number
response = self._call_api(payload, "imp_record")[0]
if "error" in response:
raise RedcapError(str(response))
return response
def import_metadata(
self, to_import, format="json", return_format="json", date_format="YMD"
):
"""
Import metadata (DataDict) into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
If successful, the number of imported fields
"""
payload = self._initialize_import_payload(to_import, format, "metadata")
payload["returnFormat"] = return_format
payload["dateFormat"] = date_format
response = self._call_api(payload, "imp_metadata")[0]
if "error" in str(response):
raise RedcapError(str(response))
return response
def _initialize_import_payload(self, to_import, format, data_type):
"""
Standardize the data to be imported and add it to the payload
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
data_type: 'record', 'metadata'
The kind of data that are imported
Returns
-------
payload : (dict, str)
The initialized payload dictionary and updated format
"""
payload = self.__basepl(data_type)
# pylint: disable=comparison-with-callable
if hasattr(to_import, "to_csv"):
# We'll assume it's a df
buf = StringIO()
if data_type == "record":
if self.is_longitudinal():
csv_kwargs = {"index_label": [self.def_field, "redcap_event_name"]}
else:
csv_kwargs = {"index_label": self.def_field}
elif data_type == "metadata":
csv_kwargs = {"index": False}
to_import.to_csv(buf, **csv_kwargs)
payload["data"] = buf.getvalue()
buf.close()
format = "csv"
elif format == "json":
payload["data"] = json.dumps(to_import, separators=(",", ":"))
else:
# don't do anything to csv/xml
payload["data"] = to_import
# pylint: enable=comparison-with-callable
payload["format"] = format
return payload
def export_file(self, record, field, event=None, return_format="json"):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
payload = self.__basepl(content="file", format=return_format)
# there's no format field in this call
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "export"
payload["field"] = field
payload["record"] = record
if event:
payload["event"] = event
content, headers = self._call_api(payload, "exp_file")
# REDCap adds some useful things in content-type
if "content-type" in headers:
splat = [
key_values.strip() for key_values in headers["content-type"].split(";")
]
key_values = [
(key_values.split("=")[0], key_values.split("=")[1].replace('"', ""))
for key_values in splat
if "=" in key_values
]
content_map = dict(key_values)
else:
content_map = {}
return content, content_map
def import_file(
self,
record,
field,
fname,
fobj,
event=None,
repeat_instance=None,
return_format="json",
):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
repeat_instance : int
(only for projects with repeating instruments/events)
The repeat instance number of the repeating event (if longitudinal)
or the repeating instrument (if classic or longitudinal).
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
payload = self.__basepl(content="file", format=return_format)
# no format in this call
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "import"
payload["field"] = field
payload["record"] = record
if event:
payload["event"] = event
if repeat_instance:
payload["repeat_instance"] = repeat_instance
file_kwargs = {"files": {"file": (fname, fobj)}}
return self._call_api(payload, "imp_file", **file_kwargs)[0]
def delete_file(self, record, field, return_format="json", event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
payload = self.__basepl(content="file", format=return_format)
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "delete"
payload["record"] = record
payload["field"] = field
if event:
payload["event"] = event
return self._call_api(payload, "del_file")[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, "field_type") == "file"
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
return True
def export_users(self, format="json"):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
payload = self.__basepl(content="user", format=format)
return self._call_api(payload, "exp_user")[0]
def export_survey_participant_list(self, instrument, event=None, format="json"):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
payload = self.__basepl(content="participantList", format=format)
payload["instrument"] = instrument
if event:
payload["event"] = event
return self._call_api(payload, "exp_survey_participant_list")
def generate_next_record_name(self):
"""Return the next record name for auto-numbering records"""
payload = self.__basepl(content="generateNextRecordName")
return self._call_api(payload, "exp_next_id")[0]
def export_project_info(self, format="json"):
"""
Export Project Information
Parameters
----------
format: (json, xml, csv), json by default
Format of returned data
"""
payload = self.__basepl(content="project", format=format)
return self._call_api(payload, "exp_proj")[0]
# pylint: disable=too-many-locals
def export_reports(
self,
format="json",
report_id=None,
raw_or_label="raw",
raw_or_label_headers="raw",
export_checkbox_labels="false",
decimal_character=None,
df_kwargs=None,
):
"""
Export a report of the Project
Notes
-----
Parameters
----------
report_id : the report ID number provided next to the report name
on the report list page
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
raw_or_label : raw [default], label - export the raw coded values or
labels for the options of multiple choice fields
raw_or_label_headers : raw [default], label - (for 'csv' format 'flat'
type only) for the CSV headers, export the variable/field names
(raw) or the field labels (label)
export_checkbox_labels : true, false [default] - specifies the format of
checkbox field values specifically when exporting the data as labels
(i.e., when rawOrLabel=label). When exporting labels, by default
(without providing the exportCheckboxLabel flag or if
exportCheckboxLabel=false), all checkboxes will either have a value
'Checked' if they are checked or 'Unchecked' if not checked.
But if exportCheckboxLabel is set to true, it will instead export
the checkbox value as the checkbox option's label (e.g., 'Choice 1')
if checked or it will be blank/empty (no value) if not checked.
If rawOrLabel=false, then the exportCheckboxLabel flag is ignored.
decimal_character : If specified, force all numbers into same decimal
format. You may choose to force all data values containing a
decimal to have the same decimal character, which will be applied
to all calc fields and number-validated text fields. Options
include comma ',' or dot/full stop '.', but if left blank/null,
then it will export numbers using the fields' native decimal format.
Simply provide the value of either ',' or '.' for this parameter.
Returns
-------
Per Redcap API:
Data from the project in the format and type specified
Ordered by the record (primary key of project) and then by event id
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl(content="report", format=ret_format)
keys_to_add = (
report_id,
raw_or_label,
raw_or_label_headers,
export_checkbox_labels,
decimal_character,
)
str_keys = (
"report_id",
"rawOrLabel",
"rawOrLabelHeaders",
"exportCheckboxLabel",
"decimalCharacter",
)
for key, data in zip(str_keys, keys_to_add):
if data:
payload[key] = data
response, _ = self._call_api(payload, "exp_report")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]}
else:
df_kwargs = {"index_col": self.def_field}
buf = StringIO(response)
dataframe = self.read_csv(buf, **df_kwargs)
buf.close()
return dataframe
# pylint: enable=too-many-locals
# pylint: enable=too-many-instance-attributes
# pylint: enable=too-many-arguments
# pylint: enable=too-many-public-methods
# pylint: enable=redefined-builtin
| mit |
balazs-bamer/FreeCAD-Surface | src/Mod/Plot/plotSeries/TaskPanel.py | 26 | 17784 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD as App
import FreeCADGui as Gui
from PySide import QtGui, QtCore
import Plot
from plotUtils import Paths
import matplotlib
from matplotlib.lines import Line2D
import matplotlib.colors as Colors
class TaskPanel:
def __init__(self):
self.ui = Paths.modulePath() + "/plotSeries/TaskPanel.ui"
self.skip = False
self.item = 0
self.plt = None
def accept(self):
return True
def reject(self):
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
self.form = form
self.retranslateUi()
self.fillStyles()
self.updateUI()
QtCore.QObject.connect(
form.items,
QtCore.SIGNAL("currentRowChanged(int)"),
self.onItem)
QtCore.QObject.connect(
form.label,
QtCore.SIGNAL("editingFinished()"),
self.onData)
QtCore.QObject.connect(
form.isLabel,
QtCore.SIGNAL("stateChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.style,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.marker,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.width,
QtCore.SIGNAL("valueChanged(double)"),
self.onData)
QtCore.QObject.connect(
form.size,
QtCore.SIGNAL("valueChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.color,
QtCore.SIGNAL("pressed()"),
self.onColor)
QtCore.QObject.connect(
form.remove,
QtCore.SIGNAL("pressed()"),
self.onRemove)
QtCore.QObject.connect(
Plot.getMdiArea(),
QtCore.SIGNAL("subWindowActivated(QMdiSubWindow*)"),
self.onMdiArea)
return False
def getMainWindow(self):
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise RuntimeError("No main window found")
def widget(self, class_id, name):
"""Return the selected widget.
Keyword arguments:
class_id -- Class identifier
name -- Name of the widget
"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
return form.findChild(class_id, name)
def retranslateUi(self):
"""Set the user interface locale strings."""
self.form.setWindowTitle(QtGui.QApplication.translate(
"plot_series",
"Configure series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"No label",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setText(
QtGui.QApplication.translate(
"plot_series",
"Remove serie",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "styleLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "markerLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Marker",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QListWidget, "items").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"List of available series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLineEdit, "label").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line title",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"If checked serie will not be considered for legend",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "lineStyle").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "markers").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QDoubleSpinBox, "lineWidth").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line width",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QSpinBox, "markerSize").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker size",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "color").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line and marker color",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Removes this serie",
None,
QtGui.QApplication.UnicodeUTF8))
def fillStyles(self):
"""Fill the style combo boxes with the availabel ones."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
# Line styles
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
string = "\'" + str(style) + "\'"
string += " (" + Line2D.lineStyles[style] + ")"
form.style.addItem(string)
# Markers
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
string = "\'" + str(marker) + "\'"
string += " (" + Line2D.markers[marker] + ")"
form.marker.addItem(string)
def onItem(self, row):
"""Executed when the selected item is modified."""
if not self.skip:
self.skip = True
self.item = row
self.updateUI()
self.skip = False
def onData(self):
"""Executed when the selected item data is modified."""
if not self.skip:
self.skip = True
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Set label
serie = Plot.series()[self.item]
if(form.isLabel.isChecked()):
serie.name = None
form.label.setEnabled(False)
else:
serie.name = form.label.text()
form.label.setEnabled(True)
# Set line style and marker
style = form.style.currentIndex()
linestyles = Line2D.lineStyles.keys()
serie.line.set_linestyle(linestyles[style])
marker = form.marker.currentIndex()
markers = Line2D.markers.keys()
serie.line.set_marker(markers[marker])
# Set line width and marker size
serie.line.set_linewidth(form.width.value())
serie.line.set_markersize(form.size.value())
plt.update()
# Regenerate series labels
self.setList()
self.skip = False
def onColor(self):
""" Executed when color pallete is requested. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.color = self.widget(QtGui.QPushButton, "color")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Show widget to select color
col = QtGui.QColorDialog.getColor()
# Send color to widget and serie
if col.isValid():
serie = plt.series[self.item]
form.color.setStyleSheet(
"background-color: rgb({}, {}, {});".format(col.red(),
col.green(),
col.blue()))
serie.line.set_color((col.redF(), col.greenF(), col.blueF()))
plt.update()
def onRemove(self):
"""Executed when the data serie must be removed."""
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Remove serie
Plot.removeSerie(self.item)
self.setList()
self.updateUI()
plt.update()
def onMdiArea(self, subWin):
"""Executed when a new window is selected on the mdi area.
Keyword arguments:
subWin -- Selected window.
"""
plt = Plot.getPlot()
if plt != subWin:
self.updateUI()
def updateUI(self):
""" Setup UI controls values if possible """
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
plt = Plot.getPlot()
form.items.setEnabled(bool(plt))
form.label.setEnabled(bool(plt))
form.isLabel.setEnabled(bool(plt))
form.style.setEnabled(bool(plt))
form.marker.setEnabled(bool(plt))
form.width.setEnabled(bool(plt))
form.size.setEnabled(bool(plt))
form.color.setEnabled(bool(plt))
form.remove.setEnabled(bool(plt))
if not plt:
self.plt = plt
form.items.clear()
return
self.skip = True
# Refill list
if self.plt != plt or len(Plot.series()) != form.items.count():
self.plt = plt
self.setList()
# Ensure that have series
if not len(Plot.series()):
form.label.setEnabled(False)
form.isLabel.setEnabled(False)
form.style.setEnabled(False)
form.marker.setEnabled(False)
form.width.setEnabled(False)
form.size.setEnabled(False)
form.color.setEnabled(False)
form.remove.setEnabled(False)
return
# Set label
serie = Plot.series()[self.item]
if serie.name is None:
form.isLabel.setChecked(True)
form.label.setEnabled(False)
form.label.setText("")
else:
form.isLabel.setChecked(False)
form.label.setText(serie.name)
# Set line style and marker
form.style.setCurrentIndex(0)
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
if style == serie.line.get_linestyle():
form.style.setCurrentIndex(i)
form.marker.setCurrentIndex(0)
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
if marker == serie.line.get_marker():
form.marker.setCurrentIndex(i)
# Set line width and marker size
form.width.setValue(serie.line.get_linewidth())
form.size.setValue(serie.line.get_markersize())
# Set color
color = Colors.colorConverter.to_rgb(serie.line.get_color())
form.color.setStyleSheet("background-color: rgb({}, {}, {});".format(
int(color[0] * 255),
int(color[1] * 255),
int(color[2] * 255)))
self.skip = False
def setList(self):
"""Setup the UI control values if it is possible."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.items.clear()
series = Plot.series()
for i in range(0, len(series)):
serie = series[i]
string = 'serie ' + str(i) + ': '
if serie.name is None:
string = string + '\"No label\"'
else:
string = string + serie.name
form.items.addItem(string)
# Ensure that selected item is correct
if len(series) and self.item >= len(series):
self.item = len(series) - 1
form.items.setCurrentIndex(self.item)
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
| lgpl-2.1 |
gt-ros-pkg/hrl-haptic-manip | hrl_common_code_darpa_m3/src/hrl_common_code_darpa_m3/software_simulation_setup/distribute_goal_on_a_grid.py | 1 | 5318 | #!/usr/bin/python
import sys
import numpy as np, math
import copy
import matplotlib.pyplot as pp
import roslib; roslib.load_manifest('hrl_common_code_darpa_m3')
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import hrl_common_code_darpa_m3.software_simulation_setup.viz as sssv
if __name__ == '__main__':
import optparse
p = optparse.OptionParser()
p.add_option('--nr', action='store', dest='nr',type='int',
default=3, help='number of goals in radial direction')
p.add_option('--nt', action='store', dest='nt',type='int',
default=3, help='number of goals in theta direction')
p.add_option('--rmin', action='store', dest='rmin',type='float',
default=0.5, help='min radial distance for goal')
p.add_option('--rmax', action='store', dest='rmax',type='float',
default=0.7, help='max x radial distance for goal')
p.add_option('--tmin', action='store', dest='tmin',type='float',
default=-30, help='min theta for goal (DEGREES)')
p.add_option('--tmax', action='store', dest='tmax',type='float',
default=30, help='max theta for goal (DEGREES)')
p.add_option('--pkl', action='store', dest='pkl', default=None,
help='pkl with obstacle locations')
p.add_option('--save_figure', '--sf', action='store_true', dest='sf',
help='save the figure')
p.add_option('--sim3', action='store_true', dest='sim3',
help='three link planar (torso, upper arm, forearm)')
p.add_option('--sim3_with_hand', action='store_true', dest='sim3_with_hand',
help='three link planar (upper arm, forearm, hand)')
p.add_option('--grid_goal', action='store', dest='grid_resol', type='float',
default=0.0, help='Grid Goal resolution for equaly distributed goals')
p.add_option('--xmin', action='store', dest='xmin',type='float',
default=0.2, help='min x coord for goals')
p.add_option('--xmax', action='store', dest='xmax',type='float',
default=0.6, help='max x coord for goals')
p.add_option('--ymin', action='store', dest='ymin',type='float',
default=-0.3, help='min y coord for goals')
p.add_option('--ymax', action='store', dest='ymax',type='float',
default=0.3, help='max y coord for goals')
opt, args = p.parse_args()
if opt.pkl == None:
raise RuntimeError('Please specify a reach_problem_dict pkl')
rpd = ut.load_pickle(opt.pkl)
nm = '.'.join(opt.pkl.split('.')[0:-1])
g_list = []
if opt.grid_resol:
x = opt.xmin
y = opt.ymin
arGridX = []
while x <= opt.xmax:
arGridX.append(x)
x += opt.grid_resol
arGridY = []
while y <= opt.ymax:
arGridY.append(y)
y += opt.grid_resol
for i in range(len(arGridX)):
for j in range(len(arGridY)):
rpd['goal'] = [arGridX[i], arGridY[j], 0]
ut.save_pickle(rpd, nm + '_x%02d'%i + '_y%02d'%j + '.pkl')
g_list.append(copy.copy(rpd['goal']))
else:
# Prevent divided by zero
if opt.nr != 1 :
r_step = (opt.rmax - opt.rmin) / (opt.nr - 1)
else:
r_step = 0
if opt.nt != 1 :
t_step = math.radians((opt.tmax - opt.tmin) / (opt.nt - 1))
else:
t_step = 0
t_start = math.radians(opt.tmin)
nt = opt.nt
for r in range(opt.nr):
for t in range(nt):
rad = opt.rmin + r_step * r
theta = t_start + t_step * t
rpd['goal'] = [rad * math.cos(theta), rad * math.sin(theta), 0]
ut.save_pickle(rpd, nm + '_r%02d'%r + '_t%02d'%t + '.pkl')
g_list.append(copy.copy(rpd['goal']))
if r%2 == 0:
t_start = t_start + t_step/2
nt = nt - 1
else:
t_start = t_start - t_step/2
nt = nt + 1
if opt.sf:
# if we are using this file, then we need to have
# hrl_software_simulation_darpa_m3
roslib.load_manifest('hrl_software_simulation_darpa_m3')
import hrl_software_simulation_darpa_m3.gen_sim_arms as gsa
if opt.sim3:
import hrl_common_code_darpa_m3.robot_config.three_link_planar_capsule as d_robot
elif opt.sim3_with_hand:
import hrl_common_code_darpa_m3.robot_config.three_link_with_hand as d_robot
mpu.set_figure_size(6,4)
pp.figure()
kinematics = gsa.RobotSimulatorKDL(d_robot)
sssv.draw_obstacles_from_reach_problem_dict(rpd)
g_arr = np.array(g_list)
pp.scatter(-g_arr[:,1], g_arr[:,0], s=50, c='g', marker='x', lw=1, edgecolor='g')
q = [0.,0,0]
ee,_ = kinematics.FK(q)
rad = np.linalg.norm(ee)
sa = -math.radians(45)
ea = math.radians(45)
mpu.plot_circle(0., 0., rad, sa, ea, color='b', linewidth=0.5)
mpu.plot_radii(0., 0., rad, sa, ea, 2*math.pi, color='b', linewidth=0.5)
pp.xlim(-0.7, 0.7)
mpu.reduce_figure_margins(left=0.02, bottom=0.02, right=0.98, top=0.98)
pp.savefig(nm+'.pdf')
| apache-2.0 |
M-R-Houghton/euroscipy_2015 | bokeh/examples/plotting/server/burtin.py | 42 | 4826 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_server
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| mit |
tdeboissiere/DeepLearningImplementations | InfoGAN/src/model/eval.py | 1 | 4641 | import sys
import numpy as np
import models
import matplotlib.pylab as plt
# Utils
sys.path.append("../utils")
import data_utils
import general_utils
def eval(**kwargs):
# Roll out the parameters
batch_size = kwargs["batch_size"]
generator = kwargs["generator"]
model_name = kwargs["model_name"]
image_data_format = kwargs["image_data_format"]
img_dim = kwargs["img_dim"]
cont_dim = (kwargs["cont_dim"],)
cat_dim = (kwargs["cat_dim"],)
noise_dim = (kwargs["noise_dim"],)
bn_mode = kwargs["bn_mode"]
noise_scale = kwargs["noise_scale"]
dset = kwargs["dset"]
epoch = kwargs["epoch"]
# Setup environment (logging directory etc)
general_utils.setup_logging(model_name)
# Load and rescale data
if dset == "RGZ":
X_real_train = data_utils.load_RGZ(img_dim, image_data_format)
if dset == "mnist":
X_real_train, _, _, _ = data_utils.load_mnist(image_data_format)
img_dim = X_real_train.shape[-3:]
# Load generator model
generator_model = models.load("generator_%s" % generator,
cat_dim,
cont_dim,
noise_dim,
img_dim,
bn_mode,
batch_size,
dset=dset)
# Load colorization model
generator_model.load_weights("../../models/%s/gen_weights_epoch%s.h5" %
(model_name, epoch))
X_plot = []
# Vary the categorical variable
for i in range(cat_dim[0]):
X_noise = data_utils.sample_noise(noise_scale, batch_size, noise_dim)
X_cont = data_utils.sample_noise(noise_scale, batch_size, cont_dim)
X_cont = np.repeat(X_cont[:1, :], batch_size, axis=0) # fix continuous noise
X_cat = np.zeros((batch_size, cat_dim[0]), dtype='float32')
X_cat[:, i] = 1 # always the same categorical value
X_gen = generator_model.predict([X_cat, X_cont, X_noise])
X_gen = data_utils.inverse_normalization(X_gen)
if image_data_format == "channels_first":
X_gen = X_gen.transpose(0,2,3,1)
X_gen = [X_gen[i] for i in range(len(X_gen))]
X_plot.append(np.concatenate(X_gen, axis=1))
X_plot = np.concatenate(X_plot, axis=0)
plt.figure(figsize=(8,10))
if X_plot.shape[-1] == 1:
plt.imshow(X_plot[:, :, 0], cmap="gray")
else:
plt.imshow(X_plot)
plt.xticks([])
plt.yticks([])
plt.ylabel("Varying categorical factor", fontsize=28, labelpad=60)
plt.annotate('', xy=(-0.05, 0), xycoords='axes fraction', xytext=(-0.05, 1),
arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
plt.tight_layout()
plt.savefig("../../figures/varying_categorical.png")
plt.clf()
plt.close()
# Vary the continuous variables
X_plot = []
# First get the extent of the noise sampling
x = np.ravel(data_utils.sample_noise(noise_scale, batch_size * 20000, cont_dim))
# Define interpolation points
x = np.linspace(x.min(), x.max(), num=batch_size)
for i in range(batch_size):
X_noise = data_utils.sample_noise(noise_scale, batch_size, noise_dim)
X_cont = np.concatenate([np.array([x[i], x[j]]).reshape(1, -1) for j in range(batch_size)], axis=0)
X_cat = np.zeros((batch_size, cat_dim[0]), dtype='float32')
X_cat[:, 1] = 1 # always the same categorical value
X_gen = generator_model.predict([X_cat, X_cont, X_noise])
X_gen = data_utils.inverse_normalization(X_gen)
if image_data_format == "channels_first":
X_gen = X_gen.transpose(0,2,3,1)
X_gen = [X_gen[i] for i in range(len(X_gen))]
X_plot.append(np.concatenate(X_gen, axis=1))
X_plot = np.concatenate(X_plot, axis=0)
plt.figure(figsize=(10,10))
if X_plot.shape[-1] == 1:
plt.imshow(X_plot[:, :, 0], cmap="gray")
else:
plt.imshow(X_plot)
plt.xticks([])
plt.yticks([])
plt.ylabel("Varying continuous factor 1", fontsize=28, labelpad=60)
plt.annotate('', xy=(-0.05, 0), xycoords='axes fraction', xytext=(-0.05, 1),
arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
plt.xlabel("Varying continuous factor 2", fontsize=28, labelpad=60)
plt.annotate('', xy=(1, -0.05), xycoords='axes fraction', xytext=(0, -0.05),
arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
plt.tight_layout()
plt.savefig("../../figures/varying_continuous.png")
plt.clf()
plt.close()
| mit |
wwliao/bkheatmap | bkheatmap.py | 1 | 12038 | import argparse
import math
import os
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.plotting import figure, gridplot, output_file, save
import matplotlib as mpl
from matplotlib import cm
import numpy as np
import pandas as pd
import scipy.spatial.distance as dist
import scipy.cluster.hierarchy as hier
__version__ = "0.1.5"
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--palette", default="Spectral_r",
help="default: %(default)s")
parser.add_argument("--width", type=int, default=400,
help="default: %(default)d")
parser.add_argument("--height", type=int, default=400,
help="default: %(default)d")
parser.add_argument("--scale", default="row",
help="default: %(default)s")
parser.add_argument("--metric", default="euclidean",
help="default: %(default)s")
parser.add_argument("--method", default="single",
help="default: %(default)s")
parser.add_argument("table")
return parser
def calc_zscore(df, scale):
if scale == "row":
df = df.T
df = (df - df.mean()) / df.std(ddof=1)
df = df.T
elif scale == "column":
df = (df - df.mean()) / df.std(ddof=1)
return df
def cluster(df, metric="euclidean", method="single", row=True, column=True):
row_linkmat, col_linkmat = None, None
if row:
distmat = dist.pdist(df, metric)
row_linkmat = hier.linkage(distmat, method)
df = df.iloc[hier.leaves_list(row_linkmat), :]
if column:
df = df.T
distmat = dist.pdist(df, metric)
col_linkmat = hier.linkage(distmat, method)
df = df.iloc[hier.leaves_list(col_linkmat), :].T
return df, row_linkmat, col_linkmat
def assign_color(df, value_var, colormap):
vmax = df[value_var].abs().max()
vmin = vmax * -1
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
df["color"] = df.apply(lambda s: mpl.colors.rgb2hex(
cm.get_cmap(colormap)(norm(s[value_var]))),
axis=1)
return df
def assign_cat_color(df, cat_var, colormap):
color = {}
norm = mpl.colors.Normalize(vmin=0, vmax=len(df[cat_var].unique())-1)
for i, cat in enumerate(df[cat_var].unique()):
color[cat] = mpl.colors.rgb2hex(cm.get_cmap(colormap)(norm(i)))
df["color"] = df.apply(lambda s: color[s[cat_var]], axis=1)
return df
def get_colorbar_source(df, value_var, colormap):
vmax = df[value_var].abs().max()
vmin = vmax * -1
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
value = np.linspace(vmin, vmax, num=50)
color = []
for v in value:
color.append(mpl.colors.rgb2hex(cm.get_cmap(colormap)(norm(v))))
return vmax*2/49.0, ColumnDataSource(data=dict(value=value, color=color))
def bkheatmap(df, prefix, scale="row", metric="euclidean", method="single",
width=400, height=400, palette="Spectral_r"):
df.index.name = "row"
zscore = calc_zscore(df, scale=scale)
zscore = zscore.dropna()
zscore, rlm, clm = cluster(zscore, metric=metric, method=method)
coldict = dict((t[1], t[0]) for t in enumerate(zscore.columns.tolist()))
rowdict = dict((t[1], t[0]) for t in enumerate(zscore.index.tolist()))
tidy_df = pd.melt(zscore.reset_index(), id_vars=["row"],
var_name="column", value_name="zscore")
tidy_df["row_id"] = tidy_df.apply(lambda s: rowdict[s["row"]], axis=1)
tidy_df["column_id"] = tidy_df.apply(lambda s: coldict[s["column"]], axis=1)
tidy_df["value"] = tidy_df.apply(lambda s: df[s["column"]][s["row"]], axis=1)
tidy_df = assign_color(tidy_df, "zscore", palette)
source = ColumnDataSource(data=tidy_df)
TOOLS = "pan,box_zoom,wheel_zoom,reset,hover"
heatmap = figure(x_range=(-0.5, max(coldict.values()) + 0.5),
y_range=(-0.5, max(rowdict.values()) + 0.5),
plot_width=width, plot_height=height,
toolbar_location="above", tools=TOOLS,
min_border_left=0, min_border_right=0,
min_border_top=0, min_border_bottom=0)
#heatmap.toolbar.logo = None
heatmap.grid.grid_line_color = None
heatmap.axis.visible = False
heatmap.outline_line_color = None
heatmap.rect("column_id", "row_id", 1, 1, source=source,
color="color", line_color=None, alpha=1)
heatmap.select_one(HoverTool).tooltips = [
("row", "@row"),
("column", "@column"),
("value", "@value"),
("z-score", "@zscore")
]
row, row_id = zip(*rowdict.items())
row_group = []
row_name = []
if all(s.count(":") for s in row):
for s in row:
rg, rn = s.split(":")[-2:] # only one-level grouping
row_group.append(rg)
row_name.append(rn)
else:
row_name = row
rowlabel = figure(plot_width=200, plot_height=heatmap.plot_height,
x_range=(0, 1), y_range=heatmap.y_range, title=None,
min_border_left=0, min_border_right=0,
min_border_top=0, min_border_bottom=0,
toolbar_location=None, webgl=True)
#rowlabel.toolbar.logo = None
rowlabel.text(0.05, "row_id", "row_name",
source=ColumnDataSource(data=dict(row_name=row_name,
row_id=row_id)),
text_align="left", text_baseline="middle",
text_font_size="10pt", text_color="#000000")
rowlabel.axis.visible = False
rowlabel.grid.grid_line_color = None
rowlabel.outline_line_color = None
column, column_id = zip(*coldict.items())
column_group = []
column_name = []
if all(s.count(":") for s in column):
for s in column:
cg, cn = s.split(":")[-2:] # only one-level grouping
column_group.append(cg)
column_name.append(cn)
else:
column_name = column
collabel = figure(plot_width=heatmap.plot_width, plot_height=200,
x_range=heatmap.x_range, y_range=(-1, 0), title=None,
min_border_left=0, min_border_right=0,
min_border_top=0, min_border_bottom=0,
toolbar_location=None, webgl=True)
#collabel.toolbar.logo = None
collabel.text("column_id", -0.05, "column_name",
source=ColumnDataSource(data=dict(column_name=column_name,
column_id=column_id)),
text_align="right", text_baseline="middle",
text_font_size="10pt", text_color="#000000",
angle=math.pi/3)
collabel.axis.visible = False
collabel.grid.grid_line_color = None
collabel.outline_line_color = None
col_dendro = hier.dendrogram(clm, no_plot=True)
coldendro = figure(plot_width=heatmap.plot_width, plot_height=180,
x_range=heatmap.x_range, title=None,
min_border_left=0, min_border_right=0,
min_border_top=0, min_border_bottom=0,
toolbar_location=None, webgl=True)
#coldendro.toolbar.logo = None
col_height = 0.09 * (df.shape[0] * width) / (df.shape[1] * height)
if column_group:
coldendro.multi_line(list(np.asarray(col_dendro["icoord"])/10 - 0.5),
list(np.asarray(col_dendro["dcoord"]) + col_height/2),
line_color="#000000", line_width=1)
groupdict = {}
groupdict["column_id"] = column_id
groupdict["column_group"] = column_group
groupdf = pd.DataFrame(groupdict)
groupdf = assign_cat_color(groupdf, "column_group", "Paired")
coldendro.rect("column_id", 0, width=1, height=col_height,
fill_color="color", line_color=None,
source=ColumnDataSource(data=groupdf))
else:
coldendro.multi_line(list(np.asarray(col_dendro["icoord"])/10 - 0.5),
list(np.asarray(col_dendro["dcoord"])),
line_color="#000000", line_width=1)
coldendro.axis.visible = False
coldendro.grid.grid_line_color = None
coldendro.outline_line_color = None
row_dendro = hier.dendrogram(rlm, orientation="left", no_plot=True)
rowdendro = figure(plot_width=200, plot_height=heatmap.plot_height,
y_range=heatmap.y_range, title=None,
min_border_left=0, min_border_right=0,
min_border_top=0, min_border_bottom=0,
toolbar_location=None, webgl=True)
#rowdendro.toolbar.logo = None
if row_group:
rowdendro.multi_line(list(np.asarray(row_dendro["dcoord"])*-1 - 0.15),
list(np.asarray(row_dendro["icoord"])/10 - 0.5),
line_color="#000000", line_width=1)
groupdict = {}
groupdict["row_id"] = row_id
groupdict["row_group"] = row_group
groupdf = pd.DataFrame(groupdict)
groupdf = assign_cat_color(groupdf, "row_group", "Set3")
rowdendro.rect(0, "row_id", width=0.3, height=1,
fill_color="color", line_color=None,
source=ColumnDataSource(data=groupdf))
else:
rowdendro.multi_line(list(np.asarray(row_dendro["dcoord"])*-1),
list(np.asarray(row_dendro["icoord"])/10 - 0.5),
line_color="#000000", line_width=1)
rowdendro.axis.visible = False
rowdendro.grid.grid_line_color = None
rowdendro.outline_line_color = None
empty = figure(plot_width=rowdendro.plot_width,
plot_height=coldendro.plot_height, title=None,
toolbar_location=None)
#empty.toolbar.logo = None
# Plot a circle to escape NO_DATA_RENDERERS error
empty.circle(x=0, y=0, color=None)
empty.axis.visible = False
empty.grid.grid_line_color = None
empty.outline_line_color = None
colorbar = figure(y_range=(-0.5, 0.5), x_axis_location="above",
plot_width=rowdendro.plot_width,
plot_height=coldendro.plot_height, title=None,
min_border_top=0, min_border_bottom=0,
min_border_left=0, min_border_right=0,
toolbar_location=None)
#colorbar.toolbar.logo = None
width, colorbar_source = get_colorbar_source(tidy_df, "zscore", palette)
colorbar.rect(x="value", y=0, fill_color="color",
line_color=None, width=width, height=1,
source=colorbar_source)
colorbar.axis.axis_line_color = None
colorbar.axis.major_tick_in = 0
colorbar.xaxis.axis_label = "z-score"
colorbar.xaxis.axis_label_text_color = "#000000"
colorbar.xaxis.axis_label_text_font_size = "12pt"
colorbar.xaxis.major_label_text_color = "#000000"
colorbar.xaxis.major_tick_line_color = "#000000"
colorbar.yaxis.major_tick_line_color = None
colorbar.yaxis.major_label_text_color = None
colorbar.axis.minor_tick_line_color = None
colorbar.grid.grid_line_color = None
colorbar.outline_line_color = None
output_file("{0}.bkheatmap.html".format(prefix),
title="{0} Bokeh Heatmap".format(prefix))
save(gridplot([[colorbar, coldendro, None],
[rowdendro, heatmap, rowlabel],
[empty, collabel, None]]))
def main():
parser = get_parser()
args = parser.parse_args()
prefix = os.path.splitext(os.path.basename(args.table))[0]
df = pd.read_table(args.table, index_col=0)
bkheatmap(df, prefix=prefix, scale=args.scale,
metric=args.metric, method=args.method,
width=args.width, height=args.height,
palette=args.palette)
if __name__ == "__main__":
exit(main())
| gpl-3.0 |
v0i0/lammps | python/examples/matplotlib_plot.py | 22 | 2270 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
sys.path.append("./pizza")
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
fig.canvas.draw()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
| gpl-2.0 |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/allsky.py | 1 | 50612 | from kapteyn import maputils, tabarray
import numpy
import sys
from matplotlib import pyplot as plt
__version__ = '1.91'
epsilon = 0.0000000001
def radians(a):
return a*numpy.pi/180.0
def degrees(a):
return a*180.0/numpy.pi
def cylrange():
X = numpy.arange(0,400.0,30.0);
# Replace last two (dummy) values by two values around 180 degrees
X[-1] = 180.0 - epsilon
X[-2] = 180.0 + epsilon
return X
def polrange():
X = numpy.arange(0,380.0,15);
# Replace last two (dummy) values by two values around 180 degrees
X[-1] = 180.0 - epsilon
X[-2] = 180.0 + epsilon
return X
def getperimeter(grat):
# Calculate perimeter of QUAD projection
xlo, y = grat.gmap.topixel((-45.0-epsilon, 0.0))
xhi, y = grat.gmap.topixel((315+epsilon, 0.0))
x, ylo = grat.gmap.topixel((180, -45))
x, yhi = grat.gmap.topixel((180, 45))
x1, y = grat.gmap.topixel((45-epsilon, 0.0))
x, ylolo = grat.gmap.topixel((0, -135+epsilon))
x, yhihi = grat.gmap.topixel((0, 135-epsilon))
perimeter = [(xlo,ylo), (x1,ylo), (x1,ylolo), (xhi,ylolo), (xhi,yhihi),
(x1,yhihi), (x1,yhi), (xlo,yhi), (xlo,ylo)]
return perimeter
def plotcoast(fn, frame, grat, col='k', lim=100, decim=5, plotsym=None, sign=1.0):
coasts = tabarray.tabarray(fn, comchar='s') # Read two columns from file
for segment in coasts.segments:
coastseg = coasts[segment].T
xw = sign * coastseg[1]; yw = coastseg[0] # First one appears to be Latitude
xs = xw; ys = yw # Reset lists which store valid pos.
if 1:
# Mask arrays if outside plot box
xp, yp = grat.gmap.topixel((numpy.array(xs),numpy.array(ys)))
# Be sure you understand
# the operator precedence: (a > 2) | (a < 5) is the proper syntax
# because a > 2 | a < 5 will result in an error due to the fact
# that 2 | a is evaluated first.
xp = numpy.ma.masked_where(numpy.isnan(xp) |
(xp > grat.pxlim[1]) | (xp < grat.pxlim[0]), xp)
yp = numpy.ma.masked_where(numpy.isnan(yp) |
(yp > grat.pylim[1]) | (yp < grat.pylim[0]), yp)
# Mask array could be of type numpy.bool_ instead of numpy.ndarray
if numpy.isscalar(xp.mask):
xp.mask = numpy.array(xp.mask, 'bool')
if numpy.isscalar(yp.mask):
yp.mask = numpy.array(yp.mask, 'bool')
# Count the number of positions in this list that are inside the box
xdc = []; ydc = []
for i in range(len(xp)):
if not xp.mask[i] and not yp.mask[i]:
if not i%decim:
xdc.append(xp.data[i])
ydc.append(yp.data[i])
if len(xdc) >= lim:
if plotsym == None:
frame.plot(xdc, ydc, color=col)
else:
frame.plot(xdc, ydc, '.', markersize=1, color=col)
def plotfig(fignum, smallversion=False):
# Set defaults
pixel = None
markerpos = None
border = None
title = ''
titlepos = 1.02
dec0 = 89.9999999999
lat_constval = None
lon_constval = None
perimeter = None
lon_world = list(range(0,360,30))
lat_world = [-dec0, -60, -30, 30, 60, dec0]
deltapx = deltapy = 0.0
annotatekwargs0 = {'color':'r'}
annotatekwargs1 = {'color':'b'}
plotdata = False
fsize = 11
figsize = (7,6)
datasign = -1
addangle0 = addangle1 = 0.0
drawgrid = False
oblique = None
framebackgroundcolor = None
grat = None # Just initialize
ilabs1 = ilabs2 = None
fig = plt.figure(figsize=figsize)
frame = fig.add_axes((0.1,0.05,0.8,0.85))
if fignum == 1:
# Fig 2 in celestial article (Calabretta et al) shows a positive cdelt1
title = r"""Plate Carree projection (CAR), non oblique with:
$(\alpha_0,\delta_0,\phi_p) = (120^\circ,0^\circ,0^\circ)$. (Cal. fig.2)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CAR',
'CRVAL1' : 120.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : 5.0,
'CTYPE2' : 'DEC--CAR',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
'LONPOLE' : 0.0,
}
X = numpy.arange(0,380.0,30.0);
Y = numpy.arange(-90,100,30.0) # i.e. include +90 also
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(header, axnum=(1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
#pixel = grat.gmap.topixel((120.0,60))
markerpos = "120 deg 60 deg"
header['CRVAL1'] = 0.0
border = annim.Graticule(header, axnum=(1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon,-180+epsilon, 0), starty=(-90,0,90))
lat_world = lon_world = None
elif fignum == 2:
title = r"""Plate Carree projection (CAR), oblique with:
$(\alpha_0,\delta_0,\phi_p) = (120^\circ,0^\circ,0^\circ)$
and obviously cdelt1 $>$ 0. (Cal. fig. 2)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CAR',
'CRVAL1' : 120.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : 5.0,
'CTYPE2' : 'DEC--CAR',
'CRVAL2' : 60.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
'LONPOLE' : 0.0,
}
X = numpy.arange(0,360.0,30.0);
Y = numpy.arange(-90,100,30.0) # i.e. include +90 also
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
markerpos = "120 deg 60 deg"
# Get the non-oblique version for the border
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
border = annim.Graticule(header, axnum= (1,2), boxsamples=10000, wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon,-180+epsilon), starty=(-90,90))
lat_world = lon_world = None
elif fignum == 3:
mu = 2.0; gamma = 30.0
title = r"""Slant zenithal (azimuthal) perspective projection (AZP) with:
$\gamma=30$ and $\mu=2$ (Cal. fig.6)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---AZP',
'CRVAL1' :0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--AZP',
'CRVAL2' : dec0, 'CRPIX2' : 30, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : mu, 'PV2_2' : gamma,
}
lowval = (180.0/numpy.pi)*numpy.arcsin(-1.0/mu) + 0.00001 # Calabretta eq.32
X = numpy.arange(0,360,15.0)
Y = numpy.arange(-30,90,15.0);
Y[0] = lowval # Add lowest possible Y to array
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum=(1,2), wylim=(lowval,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
grat.setp_lineswcs1(lowval, lw=2, color='g')
lat_world = [0, 30, 60, 90]
elif fignum == 4:
mu = 2.0; phi = 180.0; theta = 60
title = r"""Slant zenithal perspective (SZP) with:
($\mu,\phi,\theta)=(2,180,60)$ with special algorithm for border (Cal. fig.7)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---SZP',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--SZP',
'CRVAL2' : dec0, 'CRPIX2' : 20, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : mu, 'PV2_2' : phi, 'PV2_3' : theta,
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-90,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum=(1,2), wylim=(-90.0,90.0), wxlim=(-180,180),
startx=X, starty=Y)
# PROBLEM: markerpos = "180 deg -30 deg"
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
# grat.setp_tick(plotaxis=wcsgrat.top, rotation=30, ha='left')
titlepos = 1.01
# Special care for the boundary
# The algorithm seems to work but is not very accurate
xp = -mu * numpy.cos(theta*numpy.pi/180.0)* numpy.sin(phi*numpy.pi/180.0)
yp = mu * numpy.cos(theta*numpy.pi/180.0)* numpy.cos(phi*numpy.pi/180.0)
zp = mu * numpy.sin(theta*numpy.pi/180.0) + 1.0
a = numpy.linspace(0.0,360.0,500)
arad = a*numpy.pi/180.0
rho = zp - 1.0
sigma = xp*numpy.sin(arad) - yp*numpy.cos(arad)
s = numpy.sqrt(rho*rho+sigma*sigma)
omega = numpy.arcsin(1/s)
psi = numpy.arctan2(sigma,rho)
thetaxrad = psi - omega
thetax = thetaxrad * 180.0/numpy.pi + 5
g = grat.addgratline(a, thetax, pixels=False)
grat.setp_linespecial(g, lw=2, color='c')
# Select two starting points for a scan in pixel to find borders
g2 = grat.scanborder(68.26,13,3,3)
g3 = grat.scanborder(30,66.3,3,3)
grat.setp_linespecial(g2, color='r', lw=1)
grat.setp_linespecial(g3, color='r', lw=1)
elif fignum == 5:
title = r"Gnomonic projection (TAN) diverges at $\theta=0$. (Cal. fig.8)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---TAN',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--TAN',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
}
X = numpy.arange(0,360.0,15.0)
#Y = numpy.arange(0,90,15.0)
Y = [20, 30,45, 60, 75, 90]
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(20.0,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_constval = 20
lat_world = [20, 30, 60, dec0]
grat.setp_lineswcs1(20, color='g', linestyle='--')
elif fignum == 6:
title = r"Stereographic projection (STG) diverges at $\theta=-90$. (Cal. fig.9)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---STG',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -12.0,
'CTYPE2' : 'DEC--STG',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 12.0,
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-60,90,10.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-60,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_constval = -62
lat_world = list(range(-50, 10, 10))
elif fignum == 7:
title = r"Slant orthograpic projection (SIN) with: $\xi=\frac{-1}{\sqrt{6}}$ and $\eta=\frac{1}{\sqrt{6}}$ (Cal. fig.10b)"
xi = -1/numpy.sqrt(6); eta = 1/numpy.sqrt(6)
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---SIN',
'CRVAL1' :0.0, 'CRPIX1' : 40, 'CUNIT1' : 'deg', 'CDELT1' : -2,
'CTYPE2' : 'DEC--SIN',
'CRVAL2' : dec0, 'CRPIX2' : 30, 'CUNIT2' : 'deg', 'CDELT2' : 2,
'PV2_1' : xi, 'PV2_2' : eta
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-90,90,10.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
# Special care for the boundary (algorithm from Calabretta et al)
a = numpy.linspace(0,360,500)
arad = a*numpy.pi/180.0
thetaxrad = -numpy.arctan(xi*numpy.sin(arad)-eta*numpy.cos(arad))
thetax = thetaxrad * 180.0/numpy.pi + 0.000001 # Little shift to avoid NaN's at border
g = grat.addgratline(a, thetax, pixels=False)
grat.setp_linespecial(g, color='g', lw=1)
lat_constval = 50
lon_constval = 180
lat_world = [0,30,60,dec0]
elif fignum == 8:
title = r"Zenithal equidistant projection (ARC). (Cal. fig.11)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ARC',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--ARC',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-90,90,30.0)
Y[0]= -89.999999 # Graticule for -90 exactly is not plotted
#lat_world = range(-80,80,20)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
elif fignum == 9:
title = r"""Zenithal polynomial projection (ZPN) with PV2_n parameters 0 to 7.
(Cal. fig.12)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZPN',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--ZPN',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
'PV2_0' : 0.05, 'PV2_1' : 0.975, 'PV2_2' : -0.807, 'PV2_3' : 0.337, 'PV2_4' : -0.065,
'PV2_5' : 0.01, 'PV2_6' : 0.003,' PV2_7' : -0.001
}
X = numpy.arange(0,360.0,30.0)
#Y = numpy.arange(-70,90,30.0) # Diverges (this depends on selected parameters)
Y = [-70, -60, -45, -30, 0, 15, 30, 45, 60, 90]
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-70,90.0), wxlim=(0,360),
startx=X, starty=Y)
# Configure annotations
lat_constval = -72
lat_world = [-60, -30, 0, 60, dec0]
addangle0 = 90.0
annotatekwargs1.update({'ha':'left'})
elif fignum == 10:
title = r"Zenith equal area projection (ZEA). (Cal. fig.13)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZEA',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.0,
'CTYPE2' : 'DEC--ZEA',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.0
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-90,90,30.0)
Y[0]= -dec0+0.00000001
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs1(position=0, color='g', lw=2) # Set attributes for graticule line at lat = 0
lat_world = [-dec0, -30, 30, 60]
elif fignum == 11:
title = r"Airy projection (AIR) with $\theta_b = 45^\circ$. (Cal. fig.14)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---AIR',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--AIR',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-30,90,10.0)
# Diverges at dec = -90, start at dec = -30
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-30,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-30, -20, -10, 10, 40, 70]
# CYLINDRICALS
elif fignum == 12:
title = r"Gall's stereographic projection (CYP) with $\mu = 1$ and $\theta_x = 45^\circ$. (Cal. fig.16)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CYP',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.5,
'CTYPE2' : 'DEC--CYP',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.5,
'PV2_1' : 1, 'PV2_2' : numpy.sqrt(2.0)/2.0
}
X = cylrange()
Y = numpy.arange(-90,100,30.0) # i.e. include +90 also
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-90, -60,-30, 30, 60, dec0]
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
annotatekwargs0.update({'va':'bottom', 'ha':'right'})
elif fignum == 13:
title = r"Lambert's equal area projection (CEA) with $\lambda = 1$. (Cal. fig.17)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CEA',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--CEA',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
'PV2_1' : 1
}
X = cylrange()
Y = numpy.arange(-90,100,30.0) # i.e. include +90 also
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-60,-30, 30, 60]
lon_world = list(range(0,360,30))
lon_world.append(180.00000001)
elif fignum == 14:
title = "Plate Carree projection (CAR). (Cal. fig.18)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CAR',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--CAR',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = cylrange()
Y = numpy.arange(-90,100,30.0) # i.e. include +90 also
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-90, -60,-30, 30, 60, dec0]
lon_world = list(range(0,360,30))
lon_world.append(180.00000001)
elif fignum == 15:
title = "Mercator's projection (MER). (Cal. fig.19)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---MER',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--MER',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
}
X = cylrange()
Y = numpy.arange(-80,90,10.0) # Diverges at +-90 so exclude these values
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(header, axnum= (1,2), wylim=(-80,80.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-90, -60,-30, 30, 60, dec0]
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
grat.setp_lineswcs1((-80,80), linestyle='--', color='g')
elif fignum == 16:
title = "Sanson-Flamsteed projection (SFL). (Cal. fig.20)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---SFL',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--SFL',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = cylrange()
Y = numpy.arange(-90,100,30.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-dec0, -60,-30, 30, 60, dec0]
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
elif fignum == 17:
title = "Parabolic projection (PAR). (Cal. fig.21)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---PAR',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--PAR',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = cylrange()
Y = numpy.arange(-90,100,30.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-dec0, -60,-30, 30, 60, dec0]
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
elif fignum == 18:
title= "Mollweide's projection (MOL). (Cal. fig.22)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---MOL',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--MOL',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = cylrange()
Y = numpy.arange(-90,100,30.0) # Diverges at +-90 so exclude these values
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-60,-30, 30, 60]
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
elif fignum == 19:
title = "Hammer Aitoff projection (AIT). (Cal. fig.23)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---AIT',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--AIT',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = cylrange()
Y = numpy.arange(-90,100,30.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_world = [-dec0, -60,-30, 30, 60, dec0]
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
# CONIC PROJECTIONS
elif fignum == 20:
theta_a = 45
t1 = 20.0; t2 = 70.0
eta = abs(t1-t2)/2.0
title = r"""Conic perspective projection (COP) with:
$\theta_a=45^\circ$, $\theta_1=20^\circ$ and $\theta_2=70^\circ$. (Cal. fig.24)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COP',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.5,
'CTYPE2' : 'DEC--COP',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.5,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = numpy.arange(0,370.0,30.0); X[-1] = 180+epsilon
Y = numpy.arange(-30,90,15.0) # Diverges at theta_a +- 90.0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-30,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs1(-30, linestyle='--', color='g')
lon_world.append(180+epsilon)
elif fignum == 21:
theta_a = -45
t1 = -20.0; t2 = -70.0
eta = abs(t1-t2)/2.0
title = r"""Conic equal area projection (COE) with:
$\theta_a=-45$, $\theta_1=-20$ and $\theta_2=-70$. (Cal. fig.25)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COE',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--COE',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = cylrange()
Y = numpy.arange(-90,91,30.0); Y[-1] = dec0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lon_world.append(180+epsilon)
lat_constval = 5
lat_world = [-60,-30,0,30,60]
addangle0 = -90.0
elif fignum == 22:
theta_a = 45
t1 = 20.0; t2 = 70.0
eta = abs(t1-t2)/2.0
title = r"""Conic equidistant projection (COD) with:
$\theta_a=45$, $\theta_1=20$ and $\theta_2=70$. (Cal. fig.26)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COD',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--COD',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = cylrange()
Y = numpy.arange(-90,91,15)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lon_world.append(180.0+epsilon)
elif fignum == 23:
theta_a = 45
t1 = 20.0; t2 = 70.0
eta = abs(t1-t2)/2.0
title = r"""Conic orthomorfic projection (COO) with:
$\theta_a=45$, $\theta_1=20$ and $\theta2=70$. (Cal. fig.27)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COO',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--COO',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = cylrange()
Y = numpy.arange(-30,90,30.0) # Diverges at theta_a= -90.0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-30,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs1(-30, linestyle='--', color='g')
lon_world.append(180.0+epsilon)
# POLYCONIC AND PSEUDOCONIC
elif fignum == 24:
theta1 = 45
title = r"Bonne's equal area projection (BON) with $\theta_1=45$. (Cal. fig.28)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---BON',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--BON',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : theta1
}
X = polrange()
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lon_world.append(180+epsilon)
elif fignum == 25:
title = r"Polyconic projection (PCO). (Cal. fig.29)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---PCO',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--PCO',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0
}
X = polrange()
Y = numpy.arange(-90,100,15.0)
# !!!!!! Let the world coordinates for constant latitude run from 180,180
# instead of 0,360. Then one prevents the connection between the two points
# 179.9999 and 180.0001 which is a jump, but smaller than the definition of
# a rejected jump in the wcsgrat module.
# Also we need to increase the value of 'gridsamples' to
# increase the relative size of a jump.
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2),
wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y, gridsamples=2000)
lon_world.append(180+epsilon)
# QUAD CUBE PROJECTIONS
elif fignum == 26:
title = r"Tangential spherical cube projection (TSC). (Cal. fig.30)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---TSC',
'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--TSC',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0
}
X = numpy.arange(0,370.0,15.0)
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y)
# Make a polygon for the border
perimeter = getperimeter(grat)
elif fignum == 27:
title = r"COBE quadrilateralized spherical cube projection (CSC). (Cal. fig.31)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CSC',
'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--CSC',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0
}
X = numpy.arange(0,370.0,15.0)
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y)
perimeter = getperimeter(grat)
elif fignum == 28:
title = r"Quadrilateralized spherical cube projection (QSC). (Cal. fig.32)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---QSC',
'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--QSC',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = numpy.arange(-180,180,15)
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
perimeter = getperimeter(grat)
deltapx = 1
plotdata = True
elif fignum == 280:
title = r"Quadrilateralized spherical cube projection (QSC). (Cal. fig.32)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---QSC',
'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--QSC',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
}
X = numpy.arange(-180,180,15)
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
lon_world = list(range(0,360,30))
lat_world = [-90, -60, -30, 30, 60, dec0]
perimeter = getperimeter(grat)
deltapx = 1
plotdata = True
elif fignum == 270:
title = r"Quadrilateralized spherical cube projection (QSC). (Cal. fig.32)"
header = {'NAXIS' : 3, 'NAXIS1': 100, 'NAXIS2': 80, 'NAXIS3' : 6,
'CTYPE1' : 'RA---QSC',
'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -7.0,
'CTYPE2' : 'DEC--QSC',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 7.0,
'CTYPE3' : 'CUBEFACE',
'CRVAL3' : 0, 'CRPIX3' : 2,'CDELT3' : 90, 'CUNIT3' : 'deg',
}
X = numpy.arange(0,370.0,15.0)
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y)
lon_world = list(range(-180,180,30))
lat_world = [-90, -60, -30, 30, 60, dec0]
perimeter = getperimeter(grat)
elif fignum == 29:
title = r"""Zenith equal area projection (ZEA) oblique with:
$\alpha_p=0$, $\delta_p=30$ and $\phi_p=180$. (Cal. fig.33a)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZEA',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.5,
'CTYPE2' : 'DEC--ZEA',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.5,
}
X = numpy.arange(0,360,15.0)
Y = numpy.arange(-90,90,15.0)
Y[0]= -dec0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
elif fignum == 30:
title = r"""Zenith equal area projection (ZEA) oblique with:
$\alpha_p=45$, $\delta_p=30$ and $\phi_p=180$. (Cal. fig.33b)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZEA',
'CRVAL1' :45.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.5,
'CTYPE2' : 'DEC--ZEA',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.5
}
X = numpy.arange(0,360.0,15.0)
Y = numpy.arange(-90,90,15.0)
Y[0]= -dec0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0((0,180), color='g', lw=2)
elif fignum == 31:
title = r"""Zenith equal area projection (ZEA) oblique with:
$\alpha_p=0$, $\theta_p=30$ and $\phi_p = 150$. (Cal. fig.33c)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZEA',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.5,
'CTYPE2' : 'DEC--ZEA',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.5,
'PV1_3' : 150.0 # Works only with patched wcslib 4.3
}
X = numpy.arange(0,360.0,15.0)
Y = numpy.arange(-90,90,15.0)
Y[0]= -dec0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0((0,180), color='g', lw=2)
elif fignum == 32:
title = r"""Zenith equal area projection (ZEA) oblique with:
$\alpha_p=0$, $\theta_p=30$ and $\phi_p = 75$ (Cal. fig.33d)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZEA',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.5,
'CTYPE2' : 'DEC--ZEA',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.5,
'PV1_3' : 75.0
}
X = numpy.arange(0,360.0,15.0)
Y = numpy.arange(-90,90,15.0)
Y[0]= -dec0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0((0,180), color='g', lw=2)
elif fignum == 33:
theta_a = 45.0
t1 = 20.0; t2 = 70.0
eta = abs(t1-t2)/2.0
title = r"""Conic equidistant projection (COD) oblique with $\theta_a=45$, $\theta_1=20$
and $\theta_2=70$, $\alpha_p = 0$, $\delta_p = 30$, $\phi_p = 75$ also:
$(\phi_0,\theta_0) = (0,90^\circ)$. (Cal. fig.33d)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COD',
'CRVAL1' : 0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--COD',
'CRVAL2' : 30, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0,
'PV2_1' : theta_a,
'PV2_2' : eta,
'PV1_1' : 0.0, 'PV1_2' : 90.0, # IMPORTANT. This is a setting from section 7.1, p 1103
'LONPOLE' :75.0
}
X = numpy.arange(0,370.0,15.0); X[-1] = 180.000001
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
# Draw border with standard graticule
header['CRVAL1'] = 0.0
header['CRVAL2'] = theta_a
header['LONPOLE'] = 0.0
del header['PV1_1']
del header['PV1_2']
# Non oblique version as border
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), starty=(-90,90))
border.setp_lineswcs0(color='g') # Show borders in different color
border.setp_lineswcs1(color='g')
elif fignum == 34:
title = r"""Hammer Aitoff projection (AIT) oblique with:
$(\alpha_p,\delta_p) = (0^\circ,30^\circ)$, $\phi_p = 75^\circ$ also:
$(\phi_0,\theta_0) = (0^\circ,90^\circ)$. (Cal. fig.34d)"""
# Header works only with a patched wcslib 4.3
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---AIT',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--AIT',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'LONPOLE' :75.0,
'PV1_1' : 0.0, 'PV1_2' : 90.0, # IMPORTANT. This is a setting from Cal.section 7.1, p 1103
}
X = numpy.arange(0,390.0,15.0);
Y = numpy.arange(-90,100,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
# Draw border with standard graticule
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
del header['PV1_1']
del header['PV1_2']
header['LONPOLE'] = 0.0
header['LATPOLE'] = 0.0
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), skipy=True)
border.setp_lineswcs0(color='g') # Show borders in different color
border.setp_lineswcs1(color='g')
elif fignum == 35:
title = r"""COBE quadrilateralized spherical cube projection (CSC) oblique with:
$(\alpha_p,\delta_p) = (0^\circ,30^\circ)$, $\phi_p = 75^\circ$ also:
$(\phi_0,\theta_0) = (0^\circ,90^\circ)$. (Cal. fig.34d)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---CSC',
'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--CSC',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'LONPOLE': 75.0,
'PV1_1' : 0.0, 'PV1_2' : 90.0,
}
X = numpy.arange(0,370.0,30.0)
Y = numpy.arange(-90,100,30.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y)
# Take border from non-oblique version
header['CRVAL2'] = 0.0
del header['PV1_1']
del header['PV1_2']
del header['LONPOLE']
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
skipx=True, skipy=True)
perimeter = getperimeter(border)
elif fignum == 36:
title = 'Earth in zenithal perspective (AZP). (Cal. fig.36)'
# The ctype's are TLON, TLAT. These are recognized by WCSlib as longitude and latitude.
# Any other prefix is also valid.
header = {'NAXIS' : 2, 'NAXIS1': 2048, 'NAXIS2': 2048,
'PC1_1' : 0.9422, 'PC1_2' : -0.3350,
'PC2_1' : 0.3350, 'PC2_2' : 0.9422,
'CTYPE1' : 'TLON-AZP',
'CRVAL1' : 31.15, 'CRPIX1' : 681.67, 'CUNIT1' : 'deg', 'CDELT1' : 0.008542,
'CTYPE2' : 'TLAT-AZP',
'CRVAL2' : 30.03, 'CRPIX2' : 60.12, 'CUNIT2' : 'deg', 'CDELT2' : 0.008542,
'PV2_1' : -1.350, 'PV2_2' : 25.8458,
'LONPOLE' : 143.3748,
}
X = numpy.arange(-30,60.0,10.0)
Y = numpy.arange(-40,65,10.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-30,90.0), wxlim=(-20,60),
startx=X, starty=Y, gridsamples=4000)
grat.setp_lineswcs1(color='#B30000')
grat.setp_lineswcs0(color='#B30000')
grat.setp_lineswcs0(0, color='r', lw=2)
grat.setp_plotaxis('bottom', mode='all_ticks', label='Latitude / Longitude')
grat.setp_plotaxis('left', mode='switched_ticks', label='Latitude')
grat.setp_plotaxis('right', mode='native_ticks')
grat.setp_tick(wcsaxis=0, color='g')
grat.setp_tick(wcsaxis=1, color='m')
grat.setp_tick(wcsaxis=1, plotaxis=('bottom','right'), color='m', rotation=-30, ha='left')
#grat.setp_tick(plotaxis=wcsgrat.right, backgroundcolor='yellow')
grat.setp_tick(plotaxis='left', position=-10, visible=False)
g = grat.scanborder(560, 1962, 2)
grat.setp_linespecial(g, color='b', lw=2)
lat_world = lon_world = []
drawgrid = True
plotdata = True
datasign = +1
# Proof that WCSlib thinks TLON, TLAT are valid longitudes & latitudes
print("TLON and TLAT are recognized as:", grat.gmap.types)
elif fignum == 37:
title = 'WCS polyconic (PGSBOX fig.1)'
rot = 30.0 *numpy.pi/180.0
header = {'NAXIS' : 2, 'NAXIS1': 512, 'NAXIS2': 512,
'CTYPE1' : 'RA---PCO',
'PC1_1' : numpy.cos(rot), 'PC1_2' : numpy.sin(rot),
'PC2_1' : -numpy.sin(rot), 'PC2_2' : numpy.cos(rot),
'CRVAL1' : 332.0, 'CRPIX1' : 192, 'CUNIT1' : 'deg', 'CDELT1' : -1.0/5.0,
'CTYPE2' : 'DEC--PCO',
'CRVAL2' : 40.0, 'CRPIX2' : 640, 'CUNIT2' : 'deg', 'CDELT2' : 1.0/5.0,
'LONPOLE' : -30.0,
}
X = numpy.arange(-180,180.0,15.0);
Y = numpy.arange(-90,120,15.0)
# Here we demonstrate how to avoid a jump at the right corner boundary
# of the plot by increasing the value of 'gridsamples'.
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y, gridsamples=4000)
grat.setp_tick(position=(-15.0,-45.0, -60.0,-75.0), visible=False)
deltapx = 3
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
header['LONPOLE'] = 999
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), starty=(-90,90))
border.setp_gratline((0,1), color='g', lw=2)
border.setp_plotaxis((0,1,2,3), mode='no_ticks', visible=False)
elif fignum == 38:
theta_a = 60.0; eta = 15.0
title = r"WCS conic equal area projection with $\theta_a=60$ and $\eta=15$ (Cal. PGSBOX fig.2)"
header = {'NAXIS' : 2, 'NAXIS1': 512, 'NAXIS2': 512,
'CTYPE1' : 'RA---COE',
'CRVAL1' : 90.0, 'CRPIX1' : 256, 'CUNIT1' : 'deg', 'CDELT1' : -1.0/3.0,
'CTYPE2' : 'DEC--COE',
'CRVAL2' : 30.0, 'CRPIX2' : 256, 'CUNIT2' : 'deg', 'CDELT2' : 1.0/3.0,
'LONPOLE' : 150.0,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = numpy.arange(0,390.0,30.0);
Y = numpy.arange(-90,120,30.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(color='r')
grat.setp_lineswcs1(color='b')
grat.setp_tick(plotaxis=1, position=(150.0,210.0), visible=False)
deltapx = 10
# Draw border with standard graticule
header['CRVAL1'] = 0.0;
header['CRVAL2'] = 60.0
header['LONPOLE'] = 999
header['LATPOLE'] = 999
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), starty=(-90,90))
border.setp_gratline((0,1), color='g', lw=2)
border.setp_plotaxis((0,1,2,3), mode='no_ticks', visible=False)
framebackgroundcolor = 'k' # i.e. black
annotatekwargs0.update({'color':'w'})
annotatekwargs1.update({'color':'w'})
elif fignum == 39:
theta1 = 35
title = r"""Bonne's equal area projection (BON) with conformal latitude $\theta_1=35$ and
$\alpha_p=0^\circ$, $\theta_p=+45^\circ$ and N.C.P. at $(45^\circ,0^\circ)$. (Cal. PGSBOX example)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---BON',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--BON',
'CRVAL2' : 0.0, 'CRPIX2' : 35, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : theta1
}
X = polrange()
Y = numpy.arange(-90.0,100.0,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
annotatekwargs0.update({'visible':False})
annotatekwargs1.update({'visible':False})
grat.setp_lineswcs0(color='#339333') # Dark green
grat.setp_lineswcs1(color='#339333')
header['LONPOLE'] = 45.0 # Or PV1_3
header['CRVAL1'] = 0.0
header['CRVAL2'] = 45.0
oblique = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
oblique.setp_lineswcs0(0.0, color='y')
oblique.setp_lineswcs1(0.0, color='y')
oblique.setp_lineswcs0(list(range(15,360,45)), color='b')
oblique.setp_lineswcs1([15,-15,60, -60], color='b')
oblique.setp_lineswcs0(list(range(30,360,45)), color='r')
oblique.setp_lineswcs1([30,-30,75, -75], color='r')
oblique.setp_lineswcs0(list(range(45,360,45)), color='w')
oblique.setp_lineswcs1((-45,45), color='w')
framebackgroundcolor = 'k'
if not smallversion:
txt ="""Green: Native, non-oblique graticule. Yellow: Equator and prime meridian
Others: Colour coded oblique graticule"""
plt.figtext(0.1, 0.008, txt, fontsize=10)
#------------------------------------ Settings ----------------------------------------
# Apply some extra settings
if framebackgroundcolor != None:
frame.set_axis_bgcolor(framebackgroundcolor)
# Plot coastlines if required
if plotdata:
if fignum == 36:
plotcoast('WDB/world.txt', frame, grat, col='k', lim=100)
else:
plotcoast('WDB/world.txt', frame, grat, col='r', lim=50, decim=20, plotsym=',', sign=-1)
# Plot alternative borders
if perimeter != None:
p = plt.Polygon(perimeter, facecolor='#d6eaef', lw=2)
frame.add_patch(p)
Xp, Yp = list(zip(*perimeter))
frame.plot(Xp, Yp, color='r')
# Plot labels inside graticule if required
annotatekwargs0.update({'fontsize':fsize})
annotatekwargs1.update({'fontsize':fsize})
ilabs1 = grat.Insidelabels(wcsaxis=0,
world=lon_world, constval=lat_constval, deltapx=deltapx, deltapy=deltapy,
addangle=addangle0, fmt="$%g$", **annotatekwargs0)
ilabs2 = grat.Insidelabels(wcsaxis=1,
world=lat_world, constval=lon_constval, deltapx=deltapx, deltapy=deltapy,
addangle=addangle1, fmt="$%g$", **annotatekwargs1)
# Plot just 1 pixel c.q. marker
if markerpos != None:
annim.Marker(pos=markerpos, marker='o', color='red' )
if drawgrid:
pixellabels = annim.Pixellabels(plotaxis=(2,3))
# Plot the title
if smallversion:
t = frame.set_title(title, color='g', fontsize=10)
else:
t = frame.set_title(title, color='g', fontsize=13, linespacing=1.5)
t.set_y(titlepos)
#gratplot.plot()
annim.plot()
annim.interact_toolbarinfo()
if smallversion:
fn = "allsky-fig%d_small.png"%fignum
else:
fn = "allsky-fig%d.png"%fignum
plt.show()
if __name__ == "__main__":
# Process command line arguments. First is number of the all sky plot,
# second argument can be any character and sets the figure format to small.
#
# Usage:
# python plotwcs.py [<figure number> <s>]
# e.g.:
# python plotwcs.py # You will be prompted for a figure number
# python plotwcs.py 23 # Figure 23
# python plotwcs.py 23 s # Figure 23 small version
#
smallversion = False
if len(sys.argv) > 1:
fignum = int(sys.argv[1])
if len(sys.argv) > 2:
figsize = (5.0,5.0)
smallversion = True
fsize = 8
if fignum == None:
print("enter number of figures", file=sys.stderr)
#fignum = eval(input("Enter number of figure: "))
plotfig(fignum, smallversion)
| bsd-3-clause |
fzalkow/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
RJT1990/pyflux | pyflux/arma/tests/test_arimax_inference.py | 1 | 5827 | import numpy as np
import pandas as pd
from pyflux.arma import ARIMAX
# Set up some data to use for the tests
noise = np.random.normal(0,1,100)
y = np.zeros(100)
x1 = np.random.normal(0,1,100)
x2 = np.random.normal(0,1,100)
for i in range(1,len(y)):
y[i] = 0.9*y[i-1] + noise[i] + 0.1*x1[i] - 0.3*x2[i]
data = pd.DataFrame([y,x1,x2]).T
data.columns = ['y', 'x1', 'x2']
y_oos = np.random.normal(0,1,30)
x1_oos = np.random.normal(0,1,30)
x2_oos = np.random.normal(0,1,30)
data_oos = pd.DataFrame([y_oos,x1_oos,x2_oos]).T
data_oos.columns = ['y', 'x1', 'x2']
def test_bbvi():
"""
Tests an ARIMAX model estimated with BBVI, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('BBVI',iterations=100, quiet_progress=True)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('BBVI',iterations=100, quiet_progress=True, mini_batch=32)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('BBVI',iterations=200, record_elbo=True, quiet_progress=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('BBVI',iterations=200, mini_batch=32, record_elbo=True, quiet_progress=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_mh():
"""
Tests an ARIMAX model estimated with Metropolis-Hastings, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('M-H', nsims=200, quiet_progress=True)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_laplace():
"""
Tests an ARIMAX model estimated with Laplace approximation, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_pml():
"""
Tests an ARIMAX model estimated with PML, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1", data=data, ar=1, ma=1)
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_2_bbvi():
"""
Tests an ARIMAX model estimated with BBVI, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1 + x2", data=data, ar=1, ma=1)
x = model.fit('BBVI',iterations=100, quiet_progress=True)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_2_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = ARIMAX(formula="y ~ x1 + x2", data=data, ar=1, ma=1)
x = model.fit('BBVI',iterations=100, quiet_progress=True, mini_batch=32)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_2_mh():
"""
Tests an ARIMAX model estimated with MEtropolis-Hastings, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1 + x2", data=data, ar=1, ma=1)
x = model.fit('M-H', nsims=200, quiet_progress=True)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_2_laplace():
"""
Tests an ARIMAX model estimated with Laplace, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1 + x2", data=data, ar=1, ma=1)
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_2_pml():
"""
Tests an ARIMAX model estimated with PML, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = ARIMAX(formula="y ~ x1 + x2", data=data, ar=1, ma=1)
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
| bsd-3-clause |
momingsong/ns-3 | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
adas-most/adas-single-chip | source code/Python_ImageProcessing/LBP_SVM_example/SVM_testing_LBP.py | 1 | 1829 | # -*- coding: UTF-8 -*-
import cv2
import numpy as np
import io
import my_lib
from matplotlib import pyplot as plt
ImgCnt = 10
SampleSize = (32,32)
Image_data = np.zeros((ImgCnt,SampleSize[0],SampleSize[1]),np.float32)
for temp_i in range(0,ImgCnt,1):
img = cv2.imread("test_" + str(temp_i) + ".jpeg",0)
Image_data[temp_i] = img
cv2.imwrite("Image_data[0].jpg",Image_data[0])
# cv2.imwrite("Image_data[1].jpg",Image_data[1])
print Image_data.shape
feature_len = Image_data.shape[1]*Image_data.shape[2]
data = np.zeros((ImgCnt,feature_len),np.float32)
his = np.zeros((ImgCnt,8),np.float32)
unitLBP = my_lib.ImgOperations("test.jpg","test.jpg")
for LBPLoop in range(0,ImgCnt,1):
NanCheck = unitLBP.DataToImg(Image_data[LBPLoop],"transferData.jpg")
if( 1 == NanCheck ):
print 0
unitLBP.LBP_Transform("transferData.jpg","LBP[0].jpg",0)
data_cnt = 0
for y in range(0,unitLBP.LBP_Matrix.shape[0],1):
for x in range(0,unitLBP.LBP_Matrix.shape[1],1):
data[LBPLoop][data_cnt] = unitLBP.LBP_Matrix[y][x]
data_cnt = data_cnt + 1
# for y in range(0,unitLBP.LBP_Matrix.shape[0],1):
# for x in range(0,unitLBP.LBP_Matrix.shape[1],1):
# if( 1 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][0] += 1
# elif( 2 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][1] += 1
# elif( 4 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][2] += 1
# elif( 8 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][3] += 1
# elif( 16 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][4] += 1
# elif( 32 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][5] += 1
# elif( 64 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][6] += 1
# elif( 128 == unitLBP.LBP_Matrix[y][x] ):
# his[LBPLoop][7] += 1
print data
svm = cv2.SVM()
svm.load('LBP0_3type_HW.dat')
result = svm.predict_all(data)
print result
| gpl-3.0 |
leungmanhin/opencog | opencog/python/attic/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
theoryno3/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
jadhavhninad/-CSE_515_MWD_Analytics- | Phase 2/Complete_Team_project/mwd_proj/scripts_p2/Arun/part3.py | 2 | 3599 | import numpy as np
import operator
import scipy.io
import tensorly.backend as T
import tensorly.decomposition
import numpy as np
from sklearn.preprocessing import normalize
from time import time
import sys, os
from datetime import datetime
import django
import traceback
os.environ['DJANGO_SETTINGS_MODULE']="mwd_proj.settings"
django.setup()
from mwd_proj.utils.utils2 import *
import traceback
from django.db.models import Sum
import operator
import math
from django.db.models.functions import Lower
from mwd_proj.phase2.models import *
from django.db.models import Q
from mwd_proj.scripts_p2 import (print_genreactor_vector, print_genre_vector, print_user_vector, print_actor_vector, part1)
from mwd_proj.scripts_p2.Arun import ppr
def compute_Semantics_3a(setActors):
print "\n\n"
#setActors = set([1860883,486691,1335137,901175])
actor_dict = {}
act = MovieActor.objects.values_list('actorid', flat=True).distinct()
actor_count = act.count()
for n, each in enumerate(list(act)):
actor_dict[each] = n
setIndex = set([])
inv_a = {v: k for k, v in actor_dict.iteritems()}
for actorid in setActors:
#print(actor_dict[2312401])
setIndex.add(actor_dict[actorid])
results, xyz = part1.compute_Semantics_1c('TF-IDF','Lillard, Matthew','cosine',10,5,False)
#print(len(results))
#print((results[0]))
nodes,s=ppr.closedform(setIndex,results,0.85)
#print(s)
#print(s)
result = list(reversed(sorted(range(len(s)), key=lambda k: s[k])))
till_which = len(setActors)+10
print(result[:till_which])
print("Seed Actors:")
for actorid in setActors:
print(ImdbActorInfo.objects.get(actorid=actorid).name)
for ea in result[:till_which]:
ac = ImdbActorInfo.objects.get(actorid=inv_a[ea])
print(inv_a[ea], ac.name, s[ea])
#print(inv_a[ea], ac.name, s[ea], print_actor_vector.main(inv_a[ea]))
def coactor_matrix():
"""Gets coactor matrix"""
print "\n\n"
actor_dict = {}
act = MovieActor.objects.values_list('actorid', flat=True).distinct()
actor_count = act.count()
for n, each in enumerate(list(act)):
actor_dict[each] = n
results = [[0]*actor_count for i in range(actor_count)]
act = list(act)
for i in range(len(act)):
ac = ImdbActorInfo.objects.get(actorid=act[i])
movies = MovieActor.objects.filter(actorid=ac)
for movie in movies:
#print movie.movieid.movieid
result1 = MovieActor.objects.filter(movieid=movie.movieid)
for res in result1:
#print res.actorid.actorid
#print(res.actorid.actorid)
results[i][actor_dict[res.actorid.actorid]]+=1.0
for i in range(len(results)):
results[i][i] =0.0
return results, actor_dict
def compute_Semantics_3b(setActors):
#setActors = set([2312401])
results, actor_dict = coactor_matrix()
setIndex = set([])
for actorid in setActors:
#print(actor_dict[2312401])
setIndex.add(actor_dict[actorid])
#print(setIndex)
# with open("coactor_matrix.csv", "wb") as f:
# writer = csv.writer(f)
# writer.writerows(results)
inv_a = {v: k for k, v in actor_dict.iteritems()}
# nodes,s=ppr.closedform(setActors,results)
nodes,s=ppr.closedform(setIndex,results,0.85)
#print(s)
#print(s)
result = list(reversed(sorted(range(len(s)), key=lambda k: s[k])))
#print(result[:10])
till_which = len(setActors)+10
print("Seed Actors:")
for actorid in setActors:
print(ImdbActorInfo.objects.get(actorid=actorid).name)
for ea in result[:till_which]:
ac = ImdbActorInfo.objects.get(actorid=inv_a[ea])
print(inv_a[ea], ac.name, s[ea])
if __name__ == "__main__":
setActors = set([1860883,486691,1335137,901175])
compute_Semantics_3a(setActors)
compute_Semantics_3b(setActors)
| gpl-3.0 |
chungjjang80/FRETBursts | fretbursts/__init__.py | 2 | 6910 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014-2016 The Regents of the University of California,
# Antonino Ingargiola <[email protected]>
#
from __future__ import print_function, absolute_import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Citation information
_CITATION = """
FRETBursts: An Open Source Toolkit for Analysis of Freely-Diffusing Single-Molecule FRET
Ingargiola et al. (2016). http://dx.doi.org/10.1371/journal.pone.0160716 """
_INFO_CITATION = (' You are running FRETBursts (version {}).\n\n'
' If you use this software please cite the following'
' paper:\n{}\n\n').format(__version__, _CITATION)
def citation(bar=True):
cit = _INFO_CITATION
if bar:
cit = ('-' * 62) + '\n' + _INFO_CITATION + ('-' * 62)
print(cit)
import warnings
try:
import pandas
except ImportError:
has_pandas = False
warnings.warn((' - Cannot import pandas. Some functionality will not be '
'available.'))
else:
has_pandas = True
try:
import matplotlib
except ImportError:
has_matplotlib = False
warnings.warn((' - Cannot import matplotlib. Plotting will not be '
'available.'))
else:
has_matplotlib = True
try:
try:
from PyQt5 import QtWidgets, QtCore
QtGui = QtWidgets
except ImportError:
try:
from PyQt4 import QtGui, QtCore
except ImportError:
from PySide import QtGui, QtCore
except ImportError:
has_qt = False
# This catches ImportError or other errors due to broken QT installation
warnings.warn((' - Cannot import QT, custom GUI widgets disabled.'))
else:
has_qt = True
try:
import lmfit
except ImportError:
has_lmfit = False
warnings.warn((' - Cannot import lmfit. Some fitting functionalities '
' will not be available.'))
else:
has_lmfit = True
__all__numpy = ["np"]
__all__matplotlib = [
# Library modules and functions
"plt", "rcParams", "matplotlib", "plot", "hist",
"grid", "xlim", "ylim", "gca", "gcf",]
__all_local_names = [
# Local modules
"loader", "select_bursts", "bl", "bg", "bpl", "bext", "bg_cache",
"hdf5", "fretmath", "mfit", "citation", "git",
# Classes, functions, variables
"Data", "Sel", "Ph_sel",
"download_file", "init_notebook",
# Standalone plots or plots as a function of ch
"mch_plot_bg", "plot_alternation_hist", "alex_jointplot",
# Plots types used for 1ch of multi-ch plots through `dplot`
"timetrace", "timetrace_single", "ratetrace", "ratetrace_single",
"timetrace_fret", "timetrace_bg",
"hist_width", "hist_size", "hist_size_all", "hist_brightness",
"hist_fret", "hist_burst_data",
"hist2d_alex", "hist_S", "hist_sbr", "hist_asymmetry",
"hist_interphoton_single", "hist_interphoton",
"hist_bg_single", "hist_bg", "hist_ph_delays", "hist_mdelays",
"hist_mrates", "hist_burst_phrate", "hist_burst_delays",
"scatter_width_size", "scatter_rate_da", "scatter_fret_size",
"scatter_fret_nd_na", "scatter_fret_width", "scatter_da",
"scatter_naa_nt", "scatter_alex", "hexbin_alex",
# Wrapper functions that create a plot for each channel
"dplot", "dplot_48ch", "dplot_8ch", "dplot_1ch",
]
__all__ = __all__numpy + __all_local_names
import numpy as np
if has_qt:
__all__ += ['OpenFileDialog']
from .utils.gui import OpenFileDialog
if has_matplotlib:
__all__ += __all__matplotlib
from matplotlib import rcParams
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, hist, grid, xlim, ylim, gca, gcf
# Import plain module names
from . import loader, hdf5, select_bursts, fretmath
# Import modules with custom names
from . import background as bg
from . import burstlib as bl
# Import objects
from .burstlib import Data, Sel
from .ph_sel import Ph_sel
if has_pandas and has_lmfit:
from . import burstlib_ext as bext
if has_matplotlib and has_pandas and has_lmfit:
from . import mfit
from . import burst_plot as bpl
from .burst_plot import (
# Standalone plots as a function of ch
mch_plot_bg, plot_alternation_hist, alex_jointplot,
# Single-ch plots used in multi-ch plots through `dplot`
timetrace, timetrace_single, ratetrace, ratetrace_single,
timetrace_fret, timetrace_bg,
hist_width, hist_size, hist_size_all, hist_brightness,
hist_fret, hist_burst_data,
hist2d_alex, hist_S, hist_sbr, hist_asymmetry,
hist_interphoton_single, hist_interphoton,
hist_bg_single, hist_bg, hist_ph_delays, hist_mdelays,
hist_mrates, hist_burst_phrate, hist_burst_delays,
scatter_width_size, scatter_rate_da, scatter_fret_size,
scatter_fret_nd_na, scatter_fret_width, scatter_da,
scatter_naa_nt, scatter_alex, hexbin_alex,
# Wrapper functions that create a plot for each channel
dplot, dplot_48ch, dplot_8ch, dplot_1ch,
)
from .utils.misc import download_file
from .utils import git
def init_notebook(fs=13, seaborn_style='darkgrid',
mpl_backend='inline', configure_logger=True, apionly=False):
"""
Set the default plot style for inline plots using the seaborn library.
This function must be called from an ipython notebook or
ipython QT console.
Arguments:
fs (int): base font size for text labels
Returns:
The imported seaborn library. By saving the return value you
don't need to import seaborn again.
"""
if configure_logger:
import logging
logger = logging.getLogger('fretbursts')
formatter = logging.Formatter('%(levelname)s %(name)s\n%(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if mpl_backend is not None:
ip = get_ipython()
ip.enable_matplotlib(mpl_backend)
import seaborn as sns
if not apionly:
rc={'font.size': fs, 'axes.labelsize': fs, 'legend.fontsize': fs,
'axes.titlesize': fs*1.1,
'xtick.labelsize': fs, 'ytick.labelsize': fs,
'font.sans-serif': ['Arial', 'Liberation Sans'],
}
sns.set(rc=rc)
blue = '#0055d4'
green = '#2ca02c'
color_brewer = sns.color_palette("Set1", 9)
colors = np.array(color_brewer)[(1,0,2,3,4,8,6,7), :]
colors = list(colors)
colors[:3] = (blue, colors[1], green)
sns.set_palette(colors, 8)
sns.colors = colors
sns.set_style(seaborn_style)
return sns
citation()
| gpl-2.0 |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| agpl-3.0 |
luispedro/BuildingMachineLearningSystemsWithPython | ch10/neighbors.py | 1 | 1767 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
import numpy as np
import mahotas as mh
from glob import glob
from features import texture, chist
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial import distance
basedir = '../SimpleImageDataset/'
haralicks = []
chists = []
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
images.sort()
for fname in images:
imc = mh.imread(fname)
imc = imc[200:-200,200:-200]
haralicks.append(texture(mh.colors.rgb2grey(imc)))
chists.append(chist(imc))
haralicks = np.array(haralicks)
chists = np.array(chists)
features = np.hstack([chists, haralicks])
print('Computing neighbors...')
sc = StandardScaler()
features = sc.fit_transform(features)
dists = distance.squareform(distance.pdist(features))
print('Plotting...')
fig, axes = plt.subplots(2, 9, figsize=(16,8))
# Remove ticks from all subplots
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
fig.tight_layout()
fig.savefig('figure_neighbors.png', dpi=300)
| mit |
ligovirgo/seismon | RfPrediction/old/cause_lockstate.py | 2 | 28041 | #!/usr/bin/python
from __future__ import division
import os, sys, glob, optparse, warnings, time, json
import numpy as np
import subprocess
from subprocess import Popen
from lxml import etree
from scipy.interpolate import interp1d
from gwpy.timeseries import TimeSeries
import lal.gpstime
#from seismon import (eqmon, utils)
import matplotlib
matplotlib.use("AGG")
matplotlib.rcParams.update({'font.size': 18})
from matplotlib import pyplot as plt
from matplotlib import cm
def parse_commandline():
"""@parse the options given on the command-line.
"""
parser = optparse.OptionParser(usage=__doc__)
parser.add_option("-t", "--time_after_p_wave", help="time to check for lockloss status after p wave arrival.",
default = 3600)
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Run verbosely. (Default: False)")
opts, args = parser.parse_args()
# show parameters
if opts.verbose:
print >> sys.stderr, ""
print >> sys.stderr, "running network_eqmon..."
print >> sys.stderr, "version: %s"%__version__
print >> sys.stderr, ""
print >> sys.stderr, "***************** PARAMETERS ********************"
for o in opts.__dict__.items():
print >> sys.stderr, o[0]+":"
print >> sys.stderr, o[1]
print >> sys.stderr, ""
return opts
## LHO
rms_toggle = ''
os.system('mkdir -p /home/eric.coughlin/H1O1/')
os.system('mkdir -p /home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/')
for direction in ['Z','X','Y']:
if rms_toggle == "":
channel = 'H1:ISI-GND_STS_HAM2_{0}_DQ'.format(direction)
elif rms_toggle == "RMS_":
channel = 'H1:ISI-GND_STS_HAM5_{0}_BLRMS_30M_100M'.format(direction)
H1_lock_time_list = []
H1_lockloss_time_list = []
H1_peak_ground_velocity_list = []
hdir = os.environ["HOME"]
options = parse_commandline()
predicted_peak_ground_velocity_list = []
datafileH1 = open('{0}/gitrepo/seismon/RfPrediction/data/LHO_O1_{1}{2}.txt'.format(hdir, rms_toggle, direction), 'r')
resultfileH1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LHO_lockstatus_{0}{1}.txt'.format(rms_toggle, direction), 'w')
H1_channel_lockstatus_data = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/segs_Locked_H_1126569617_1136649617.txt', 'r')
# This next section of code is where the data is seperated into two lists to make this data easier to search through and process.
for item in (line.strip().split() for line in H1_channel_lockstatus_data):
H1_lock_time = item[0]
H1_lockloss_time = item[1]
H1_lock_time_list.append(float(H1_lock_time))
H1_lockloss_time_list.append(float(H1_lockloss_time))
#resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} \n'.format('eq arrival time','pw arrival time','peak ground velocity','lockloss'))
for column in ( line.strip().split() for line in datafileH1):
eq_time = column[0] # This is the time that the earthquake was detected
eq_mag = column[1]
pw_arrival_time = column[2] #this is the arrival time of the pwave
sw_arrival_time = column[3]
eq_distance = column[12]
eq_depth = column[13]
peak_acceleration = column[17]
peak_displacement = column[19]
rw_arrival_time = column[5] #this is the arrival time of rayleigh wave
peak_ground_velocity = column[15] # this is the peak ground velocity during the time of the earthquake.
predicted_peak_ground_velocity = column[7]
predicted_peak_ground_velocity_list.append(float(predicted_peak_ground_velocity))
# The next function is designed to to take a list and find the first item in that list that matches the conditions. If an item is not in the list that matches the condition it is possible to set a default value which will prevent the program from raising an error otherwise.
#H1_lock_time = next((item for item in H1_lock_time_list if min(H1_lock_time_list, key=lambda x:abs(x-float(pw_arrival_time)))),[None])
#H1_lockloss_time = next((item for item in H1_lockloss_time_list if min(H1_lockloss_time_list, key=lambda x:abs(x-float(float(pw_arrival_time)+float(options.time_after_p_wave))))),[None])
H1_lock_time = min(H1_lock_time_list, key=lambda x:abs(x-float(pw_arrival_time)))
H1_lockloss_time = min(H1_lockloss_time_list, key=lambda x:abs(x-float(float(pw_arrival_time) + float(options.time_after_p_wave))))
lockloss = ""
if (H1_lock_time <= float(pw_arrival_time) and H1_lockloss_time <= float(float(pw_arrival_time) + float(options.time_after_p_wave))): # The if statements are designed to check if the interferometer is in lock or not and if it is. Did it lose lock around the time of the earthquake?
lockloss = "Y"
resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss))
elif (H1_lock_time <= float(pw_arrival_time) and H1_lockloss_time > float(float(pw_arrival_time) + float(options.time_after_p_wave))):
lockloss = "N"
resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss))
elif (H1_lock_time > float(pw_arrival_time)):
lockloss = "Z"
resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss))
datafileH1.close()
resultfileH1.close()
H1_channel_lockstatus_data.close()
eq_time_list = []
locklosslist = []
pw_arrival_list = []
peak_acceleration_list = []
peak_displacement_list = []
eq_mag_list = []
eq_distance_list = []
eq_depth_list = []
resultfileplotH1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LHO_lockstatus_{0}{1}.txt'.format(rms_toggle, direction), 'r')
for item in (line.strip().split() for line in resultfileplotH1):
eq_time = item[0]
pw_arrival = item[1]
peakgroundvelocity = item[2]
peak_acceleration = item[3]
peak_displacement = item[4]
eq_mag = item[5]
eq_distance = item[6]
eq_depth = item[7]
lockloss = item[8]
H1_peak_ground_velocity_list.append(float(peakgroundvelocity))
locklosslist.append(lockloss)
eq_time_list.append(eq_time)
pw_arrival_list.append(pw_arrival)
peak_acceleration_list.append(peak_acceleration)
peak_displacement_list.append(peak_displacement)
eq_mag_list.append(eq_mag)
eq_distance_list.append(eq_distance)
eq_depth_list.append(eq_depth)
H1_binary_file = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LHO_O1_binary_{0}{1}.txt'.format(direction, rms_toggle), 'w')
for eq_time, pw_arrival, peakgroundvelocity, peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth, lockloss in zip(eq_time_list, pw_arrival_list, H1_peak_ground_velocity_list, peak_acceleration_list,peak_displacement_list,eq_mag_list,eq_distance_list,eq_depth_list, locklosslist):
if lockloss == "Y":
lockloss_binary = '1'
H1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival,peakgroundvelocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss_binary))
elif lockloss == "N":
lockloss_binary = '0'
H1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival,peakgroundvelocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss_binary))
else:
pass
H1_binary_file.close()
locklosslistZ = []
locklosslistY = []
locklosslistN = []
eq_time_list_Z = []
eq_time_list_N = []
eq_time_list_Y = []
H1_peak_ground_velocity_list_Z = []
H1_peak_ground_velocity_list_N = []
H1_peak_ground_velocity_list_Y = []
peak_ground_acceleration_list_Z = []
peak_ground_acceleration_list_N = []
peak_ground_acceleration_list_Y = []
H1_peak_ground_velocity_sorted_list, locklosssortedlist, predicted_peak_ground_velocity_sorted_list = (list(t) for t in zip(*sorted(zip(H1_peak_ground_velocity_list, locklosslist, predicted_peak_ground_velocity_list))))
num_lock_list = []
YN_peak_list = []
for sortedpeak, sortedlockloss in zip(H1_peak_ground_velocity_sorted_list, locklosssortedlist):
if sortedlockloss == "Y":
YN_peak_list.append(sortedpeak)
num_lock_list.append(1)
elif sortedlockloss == "N":
YN_peak_list.append(sortedpeak)
num_lock_list.append(0)
num_lock_prob_cumsum = np.divide(np.cumsum(num_lock_list), np.cumsum(np.ones(len(num_lock_list))))
f, axarr = plt.subplots(1)
for t,time,peak, peak_acc, lockloss in zip(range(len(eq_time_list)),eq_time_list,H1_peak_ground_velocity_list,peak_acceleration_list,locklosslist):
if lockloss == "Z":
eq_time_list_Z.append(t)
H1_peak_ground_velocity_list_Z.append(peak)
locklosslistZ.append(lockloss)
peak_ground_acceleration_list_Z.append(peak_acc)
elif lockloss == "N":
eq_time_list_N.append(t)
H1_peak_ground_velocity_list_N.append(peak)
locklosslistN.append(lockloss)
peak_ground_acceleration_list_N.append(peak_acc)
elif lockloss == "Y":
eq_time_list_Y.append(t)
H1_peak_ground_velocity_list_Y.append(peak)
locklosslistY.append(lockloss)
peak_ground_acceleration_list_Y.append(peak_acc)
axarr.plot(eq_time_list_N, H1_peak_ground_velocity_list_N, 'go', label='locked at earthquake(eq)')
axarr.plot(eq_time_list_Y, H1_peak_ground_velocity_list_Y, 'ro', label='lockloss at earthquake(eq)')
axarr.set_title('H1 Lockstatus Plot')
axarr.set_yscale('log')
axarr.set_xlabel('earthquake count(eq)')
axarr.set_ylabel('peak ground velocity(m/s)')
axarr.legend(loc='best')
#f.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/lockstatus_LHO_{0}{1}.png'.format(rms_toggle, direction))
f.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.figure(2)
plt.plot(eq_time_list_N, peak_ground_acceleration_list_N, 'go', label='locked at earthquake(eq)')
plt.plot(eq_time_list_Y, peak_ground_acceleration_list_Y, 'ro', label='lockloss at earthquake(eq)')
plt.title('H1 Lockstatus Plot(acceleration)')
plt.yscale('log')
plt.xlabel('earthquake count(eq)')
plt.ylabel('peak ground acceleration(m/s)')
plt.legend(loc='best')
plt.savefig('/home/eric.coughlin/public_html/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
plt.figure(3)
plt.plot(H1_peak_ground_velocity_sorted_list, predicted_peak_ground_velocity_sorted_list, 'o', label='actual vs predicted')
plt.title('H1 actual vs predicted ground velocity')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('peak ground velocity(m/s)')
plt.ylabel('predicted peak ground velocity(m/s)')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/check_prediction_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/check_prediction_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
threshold_file_H1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/threshhold_data_{0}{1}.txt'.format(rms_toggle, direction), 'w')
num_of_lockloss = len(locklosslistY)
total_lockstatus = num_of_lockloss + len(locklosslistN)
total_lockstatus_all = num_of_lockloss + len(locklosslistN) + len(locklosslistZ)
total_percent_lockloss = num_of_lockloss / total_lockstatus
threshold_file_H1.write('The percentage of total locklosses is {0}% \n'.format(total_percent_lockloss * 100))
threshold_file_H1.write('The total number of earthquakes is {0}. \n'.format(total_lockstatus_all))
eqcount_50 = 0
eqcount_75 = 0
eqcount_90 = 0
eqcount_95 = 0
for item, thing in zip(num_lock_prob_cumsum, YN_peak_list):
if item >= .5:
eqcount_50 = eqcount_50 + 1
if item >= .75:
eqcount_75 = eqcount_75 + 1
if item >= .9:
eqcount_90 = eqcount_90 + 1
if item >= .95:
eqcount_95 = eqcount_95 + 1
threshold_file_H1.write('The number of earthquakes above 50 percent is {0}. \n'.format(eqcount_50))
threshold_file_H1.write('The number of earthquakes above 75 percent is {0}. \n'.format(eqcount_75))
threshold_file_H1.write('The number of earthquakes above 90 percent is {0}. \n'.format(eqcount_90))
threshold_file_H1.write('The number of earthquakes above 95 percent is {0}. \n'.format(eqcount_95))
probs = [0.5, 0.75, 0.9, 0.95]
num_lock_prob_cumsum_sort = np.unique(num_lock_prob_cumsum)
YN_peak_list_sort = np.unique(YN_peak_list)
num_lock_prob_cumsum_sort, YN_peak_list_sort = zip(*sorted(zip(num_lock_prob_cumsum_sort, YN_peak_list_sort)))
thresholdsf = interp1d(num_lock_prob_cumsum_sort,YN_peak_list_sort, bounds_error=False)
for item in probs:
threshold = thresholdsf(item)
threshold_file_H1.write('The threshhold at {0}% is {1}(m/s) \n'.format(item * 100, threshold))
threshold_file_H1.write('The number of times of locklosses is {0}. \n'.format(len(locklosslistY)))
threshold_file_H1.write('The number of times of no locklosses is {0}. \n'.format(len(locklosslistN)))
threshold_file_H1.write('The number of times of not locked is {0}. \n'.format(len(locklosslistZ)))
threshold_file_H1.close()
plt.figure(4)
plt.plot(YN_peak_list_sort, num_lock_prob_cumsum_sort, 'kx', label='probability of lockloss')
plt.title('H1 Lockloss Probability')
plt.xscale('log')
plt.grid(True)
plt.xlabel('peak ground velocity (m/s)')
plt.ylabel('Lockloss Probablity')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/lockloss_probablity_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockloss_probablity_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
## LLO
os.system('mkdir -p /home/eric.coughlin/L1O1/')
os.system('mkdir -p /home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/')
for direction in ['Z','X','Y']:
if rms_toggle == "":
channel = 'L1:ISI-GND_STS_HAM2_{0}_DQ'.format(direction)
elif rms_toggle == "RMS_":
channel = 'L1:ISI-GND_STS_HAM5_{0}_BLRMS_30M_100M'.format(direction)
L1_lock_time_list = []
L1_lockloss_time_list = []
options = parse_commandline()
predicted_peak_ground_velocity_list = []
H1_peak_ground_velocity_list =[]
datafileL1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_O1_{0}{1}.txt'.format(rms_toggle, direction), 'r')
resultfileL1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_lockstatus_{0}{1}.txt'.format(rms_toggle, direction), 'w')
L1_channel_lockstatus_data = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/segs_Locked_L_1126569617_1136649617.txt', 'r')
for item in (line.strip().split() for line in L1_channel_lockstatus_data):
L1_lock_time = item[0]
L1_lockloss_time = item[1]
L1_lock_time_list.append(float(L1_lock_time))
L1_lockloss_time_list.append(float(L1_lockloss_time))
#resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} \n'.format('eq arrival time','pw arrival time','peak ground velocity','lockloss'))
for column in ( line.strip().split() for line in datafileL1):
eq_time = column[0]
eq_mag = column[1]
pw_arrival_time = column[2]
sw_arrival_time = column[3]
eq_distance = column[12]
eq_depth = column[13]
peak_acceleration = column[17]
peak_displacement = column[19]
rw_arrival_time = column[5]
peak_ground_velocity = column[15]
predicted_peak_ground_velocity = column[7]
predicted_peak_ground_velocity_list.append(float(predicted_peak_ground_velocity))
#L1_lock_time = next((item for item in L1_lock_time_list if item <= float(pw_arrival_time)),[None])
#L1_lockloss_time = next((item for item in L1_lockloss_time_list if item <= float(float(pw_arrival_time) + float(options.time_after_p_wave))),[None])
L1_lock_time = min(L1_lock_time_list, key=lambda x:abs(x-float(pw_arrival_time)))
L1_lockloss_time = min(L1_lockloss_time_list, key=lambda x:abs(x-float(float(pw_arrival_time) + float(options.time_after_p_wave))))
lockloss = ""
if (L1_lock_time <= float(pw_arrival_time) and L1_lockloss_time <= float(float(pw_arrival_time) + float(options.time_after_p_wave))):
lockloss = "Y"
resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss))
elif (L1_lock_time <= float(pw_arrival_time) and L1_lockloss_time > float(float(pw_arrival_time) + float(options.time_after_p_wave))):
lockloss = "N"
resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss))
elif (L1_lock_time > float(pw_arrival_time)):
lockloss = "Z"
resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss))
datafileL1.close()
resultfileL1.close()
L1_channel_lockstatus_data.close()
eq_time_list = []
locklosslist = []
pw_arrival_list = []
peak_acceleration_list =[]
peak_displacement_list = []
eq_mag_list = []
eq_distance_list = []
eq_depth_list = []
resultfileplotL1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_lockstatus_{0}{1}.txt'.format(rms_toggle, direction), 'r')
for item in (line.strip().split() for line in resultfileplotL1):
eq_time = item[0]
pw_arrival = item[1]
peakgroundvelocity = item[2]
peak_acceleration = item[3]
peak_displacement = item[4]
eq_mag = item[5]
eq_distance = item[6]
eq_depth = item[7]
lockloss = item[8]
H1_peak_ground_velocity_list.append(float(peakgroundvelocity))
locklosslist.append(lockloss)
eq_time_list.append(eq_time)
pw_arrival_list.append(pw_arrival)
peak_acceleration_list.append(peak_acceleration)
peak_displacement_list.append(peak_displacement)
eq_mag_list.append(eq_mag)
eq_distance_list.append(eq_distance)
eq_depth_list.append(eq_depth)
L1_binary_file = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_O1_binary_{0}{1}.txt'.format(rms_toggle, direction), 'w')
for eq_time, pw_arrival, peakgroundvelocity, peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth, lockloss in zip(eq_time_list, pw_arrival_list, H1_peak_ground_velocity_list, peak_acceleration_list,peak_displacement_list,eq_mag_list,eq_distance_list,eq_depth_list, locklosslist):
if lockloss == "Y":
lockloss_binary = '1'
L1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival,peakgroundvelocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss_binary))
elif lockloss == "N":
lockloss_binary = '0'
L1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} {7:^20} {8:^20} \n'.format(eq_time,pw_arrival,peakgroundvelocity,peak_acceleration,peak_displacement,eq_mag,eq_distance,eq_depth,lockloss_binary))
else:
pass
L1_binary_file.close()
locklosslistZ = []
locklosslistY = []
locklosslistN = []
eq_time_list_Z = []
eq_time_list_N = []
eq_time_list_Y = []
H1_peak_ground_velocity_list_Z = []
H1_peak_ground_velocity_list_N = []
H1_peak_ground_velocity_list_Y = []
peak_ground_acceleration_list_Z = []
peak_ground_acceleration_list_N = []
peak_ground_acceleration_list_Y = []
H1_peak_ground_velocity_sorted_list, locklosssortedlist, predicted_peak_ground_velocity_sorted_list = (list(t) for t in zip(*sorted(zip(H1_peak_ground_velocity_list, locklosslist, predicted_peak_ground_velocity_list))))
num_lock_list = []
YN_peak_list = []
for sortedpeak, sortedlockloss in zip(H1_peak_ground_velocity_sorted_list, locklosssortedlist):
if sortedlockloss == "Y":
YN_peak_list.append(sortedpeak)
num_lock_list.append(1)
elif sortedlockloss == "N":
YN_peak_list.append(sortedpeak)
num_lock_list.append(0)
num_lock_prob_cumsum = np.cumsum(num_lock_list) / np.cumsum(np.ones(len(num_lock_list)))
plt.figure(8)
for t,time,peak,peak_acc,lockloss in zip(range(len(eq_time_list)),eq_time_list,H1_peak_ground_velocity_list,peak_acceleration_list,locklosslist):
if lockloss == "Z":
eq_time_list_Z.append(t)
H1_peak_ground_velocity_list_Z.append(peak)
locklosslistZ.append(lockloss)
peak_ground_acceleration_list_Z.append(peak_acc)
elif lockloss == "N":
eq_time_list_N.append(t)
H1_peak_ground_velocity_list_N.append(peak)
locklosslistN.append(lockloss)
peak_ground_acceleration_list_N.append(peak_acc)
elif lockloss == "Y":
eq_time_list_Y.append(t)
H1_peak_ground_velocity_list_Y.append(peak)
locklosslistY.append(lockloss)
peak_ground_acceleration_list_Y.append(peak_acc)
plt.plot(eq_time_list_N, H1_peak_ground_velocity_list_N, 'go', label='locked at earthquake(eq)')
plt.plot(eq_time_list_Y, H1_peak_ground_velocity_list_Y, 'ro', label='lockloss at earthquake(eq)')
plt.title('L1 Lockstatus Plot')
plt.yscale('log')
plt.xlabel('earthquake count(eq)')
plt.ylabel('peak ground velocity(m/s)')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/lockstatus_LLO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_LLO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
plt.figure(23)
plt.plot(eq_time_list_N, peak_ground_acceleration_list_N, 'go', label='locked at earthquake(eq)')
plt.plot(eq_time_list_Y, peak_ground_acceleration_list_Y, 'ro', label='lockloss at earthquake(eq)')
plt.title('H1 Lockstatus Plot(acceleration)')
plt.yscale('log')
plt.xlabel('earthquake count(eq)')
plt.ylabel('peak ground acceleration(m/s)')
plt.legend(loc='best')
plt.savefig('/home/eric.coughlin/public_html/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
plt.figure(9)
plt.plot(H1_peak_ground_velocity_list, predicted_peak_ground_velocity_list, 'o', label='actual vs predicted')
plt.title('L1 actual vs predicted ground velocity')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('peak ground velocity(m/s)')
plt.ylabel('predicted peak ground velocity(m/s)')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/check_predictionLLO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/check_predictionLLO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
threshold_file_L1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/threshhold_data_{0}{1}.txt'.format(rms_toggle, direction), 'w')
num_of_lockloss = len(locklosslistY)
total_lockstatus = num_of_lockloss + len(locklosslistN)
total_lockstatus_all = num_of_lockloss + len(locklosslistN) + len(locklosslistZ)
total_percent_lockloss = num_of_lockloss / total_lockstatus
threshold_file_L1.write('The percentage of total locklosses is {0}% \n'.format(total_percent_lockloss * 100))
threshold_file_L1.write('The total number of earthquakes is {0}. \n'.format(total_lockstatus_all))
eqcount_50 = 0
eqcount_75 = 0
eqcount_90 = 0
eqcount_95 = 0
for item, thing in zip(num_lock_prob_cumsum, YN_peak_list):
if item >= .5:
eqcount_50 = eqcount_50 + 1
if item >= .75:
eqcount_75 = eqcount_75 + 1
if item >= .9:
eqcount_90 = eqcount_90 + 1
if item >= .95:
eqcount_95 = eqcount_95 + 1
threshold_file_L1.write('The number of earthquakes above 50 percent is {0}. \n'.format(eqcount_50))
threshold_file_L1.write('The number of earthquakes above 75 percent is {0}. \n'.format(eqcount_75))
threshold_file_L1.write('The number of earthquakes above 90 percent is {0}. \n'.format(eqcount_90))
threshold_file_L1.write('The number of earthquakes above 95 percent is {0}. \n'.format(eqcount_95))
probs = [0.5, 0.75, 0.9, 0.95]
num_lock_prob_cumsum_sort = np.unique(num_lock_prob_cumsum)
YN_peak_list_sort = np.unique(YN_peak_list)
num_lock_prob_cumsum_sort, YN_peak_list_sort = zip(*sorted(zip(num_lock_prob_cumsum_sort, YN_peak_list_sort)))
thresholds = []
thresholdsf = interp1d(num_lock_prob_cumsum_sort,YN_peak_list_sort,bounds_error=False)
for item in probs:
threshold = thresholdsf(item)
threshold_file_L1.write('The threshhold at {0}% is {1}(m/s) \n'.format(item * 100, threshold))
threshold_file_L1.write('The number of times of locklosses is {0}. \n'.format(len(locklosslistY)))
threshold_file_L1.write('The number of times of no locklosses is {0}. \n'.format(len(locklosslistN)))
threshold_file_L1.write('The number of times of not locked is {0}. \n'.format(len(locklosslistZ)))
threshold_file_L1.close()
plt.figure(10)
plt.plot(YN_peak_list_sort, num_lock_prob_cumsum_sort, 'kx', label='probability of lockloss')
plt.title('L1 Lockloss Probability')
plt.xscale('log')
plt.grid(True)
plt.xlabel('peak ground velocity (m/s)')
plt.ylabel('Lockloss Probablity')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/lockloss_probablity_LLO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockloss_probablity_LLO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
| gpl-3.0 |
zihua/scikit-learn | examples/decomposition/plot_pca_iris.py | 65 | 1485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
huobaowangxi/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
jlegendary/SimpleCV | scripts/install/win/OpenKinect/freenect-examples/demo_mp_async.py | 15 | 1082 | #!/usr/bin/env python
import freenect
import matplotlib.pyplot as mp
import signal
import frame_convert
mp.ion()
image_rgb = None
image_depth = None
keep_running = True
def display_depth(dev, data, timestamp):
global image_depth
data = frame_convert.pretty_depth(data)
mp.gray()
mp.figure(1)
if image_depth:
image_depth.set_data(data)
else:
image_depth = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def display_rgb(dev, data, timestamp):
global image_rgb
mp.figure(2)
if image_rgb:
image_rgb.set_data(data)
else:
image_rgb = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def body(*args):
if not keep_running:
raise freenect.Kill
def handler(signum, frame):
global keep_running
keep_running = False
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
| bsd-3-clause |
robin-lai/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
dmargala/tpcorr | examples/plot_acceptance_contours.py | 1 | 3423 | #!/usr/bin/env python
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 18})
mpl.rcParams.update({'savefig.dpi': 200})
mpl.rcParams.update({'savefig.bbox': 'tight'})
import matplotlib.pyplot as plt
import scipy.interpolate
import astropy.units as u
import tpcorr
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--wlen', type=float, default=5400.,
help='Observing wavelength')
args = parser.parse_args()
# Observing wavelength
wlen = args.wlen * u.Angstrom
# Initialize telescope model
sdss_25m = tpcorr.acceptance_model.Telescope()
# psf fwhm
fwhm0 = 1.5 * u.arcsec
fwhm_array = np.array([1.2, 1.5, 1.8]) * u.arcsec
linestyles = ['-.', 'solid', 'dashed']
# offset sampling grid
dmin, dmax, nd = (0, 1.5, 50)
offsets = np.linspace(dmin, dmax, nd)
offset_std_grid, offset_grid = np.meshgrid(offsets, offsets)
edges = np.linspace(dmin, dmax, nd + 1)
# Levels for acceptance ratio contours
acceptance_ratio_levels = np.linspace(0.5, 0.9, 3)
# acceptance_ratio_levels_inverse = [1.0/l for l in acceptance_ratio_levels[::-1]]
plt.figure(figsize=(8,6))
for i,(fwhm,linestyle) in enumerate(zip(fwhm_array,linestyles)):
# Draw reference contours for Gaussian atmosphere
psf = sdss_25m.get_atmospheric_psf(wlen, fwhm, gauss=True)
acceptance = sdss_25m.calculate_fiber_acceptance(psf)
# acceptance = tpcorr.acceptance_model.AcceptanceModel(fwhm)
acceptance_ratio = acceptance(offset_std_grid) / acceptance(offset_grid)
contours_ratio = plt.contour(offset_grid, offset_std_grid, acceptance_ratio, acceptance_ratio_levels,
colors='green', linewidths=1, linestyles=linestyle)
contours_ratio = plt.contour(offset_grid, offset_std_grid, acceptance_ratio, 1.0/acceptance_ratio_levels,
colors='green', linewidths=1, linestyles=linestyle)
# Draw reference contours for Kolmogorov atmosphere, use the reference fwhm
psf = sdss_25m.get_atmospheric_psf(wlen, fwhm0, gauss=False)
acceptance = sdss_25m.calculate_fiber_acceptance(psf)
acceptance_ratio = acceptance(offset_std_grid) / acceptance(offset_grid)
contours_ratio = plt.contour(offset_grid, offset_std_grid, acceptance_ratio, acceptance_ratio_levels,
colors='black', linewidths=1, linestyles='solid')
# Add contour labels
plt.clabel(contours_ratio, fontsize=11, fmt=lambda l: '%.2f'%l)
contours_ratio = plt.contour(offset_grid, offset_std_grid, acceptance_ratio, 1.0/acceptance_ratio_levels,
colors='black', linewidths=1, linestyles='solid')
# Add contour labels
plt.clabel(contours_ratio, fontsize=11, fmt=lambda l: '%.2f'%l)
# draw 1 to 1 line
plt.plot(offsets, offsets, color='green', ls='-', lw=1)
# Set aspect ratio and plot limits
plt.gca().set_aspect('equal')
plt.xlim(dmin, dmax)
plt.ylim(dmin, dmax)
# Add title and axis labels
plt.xlabel(r'$d_i^\ast(\lambda,\lambda_i,h_\mathrm{obs})$ $(\mathrm{arcseconds})$')
plt.ylabel(r'$d_i^\ast(\lambda,\lambda_c,h_\mathrm{obs})$ $(\mathrm{arcseconds})$')
plt.title(r'$\lambda$ = %d $\AA$' % wlen.value)
# Save figure
plt.savefig('acceptance_contours.pdf')
if __name__ == '__main__':
main()
| mit |
zrhans/python | exemplos/Examples.lnk/bokeh/glyphs/anscombe.py | 6 | 2961 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle, Line
from bokeh.models import (
ColumnDataSource, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr,
title=title, plot_width=400, plot_height=400,
border_fill='white', background_fill='#e9e0db'
)
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]], plot_width=800)
doc = Document()
doc.add(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| gpl-2.0 |
actlea/TopicalCrawler | TopicalCrawl/TopicalCrawl/TopicalCrawl/classifier/build_dict.py | 1 | 7498 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: phpergao
@license: Apache Licence
@file: build_dict.py
@time: 16-4-13 上午11:38
"""
from multiclassifier import zh_tokenize
from base import read_text_src
import json
from math import log
import cPickle
import os
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
def dic_add(dic, key, cnt=1):
if key in dic:
dic[key] += cnt
else:
dic[key] = cnt
def unicode2str(text):
if isinstance(text, str):
return text
else:
return text.encode('utf-8')
def is_rarity_ok(Rmax, Rth, k, LDC, TFIDF, Tth):
if LDC<Rmax and TFIDF>Rth:
return True
for i in range(1,k+1):
if 2**(i-1)*Rmax<LDC<2**(i)*Rmax and TFIDF>Tth:
return True
return False
def is_popularity_ok(Tth, Pmax, LDC, TFIDF):
return TFIDF>Tth and LDC<Pmax
def compute_tfidf(path, dest_dir, N=150):
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
with open(path) as fr:
lines = fr.read()
lines = lines.splitlines()
lines = [line.split('\t')[1] for line in lines]
tok2tf = {}
tok2idf = {}
tok2tfidf = {}
bgram2LDC = {}
tok2LDC = {}
popularity = {}
rarity = {}
for index, line in enumerate(lines):
toks = zh_tokenize(line)
#收集bgram词汇,统计两个词同时出现的频率
gram = 2
for i in range(len(toks) - gram + 1):
bgram=tuple(toks[i:i + gram])
if bgram not in bgram2LDC:
bgram2LDC[bgram]=1
# toks.append(tuple(toks[i:i + gram]))
# 计算tf值
for tok in toks:
dic_add(tok2tf, tok)
tok2idf.setdefault(tok, set()).add(index)
N = len(lines)
#compute tf*idf
for tok in tok2tf:
Nt = len(tok2idf[tok])
tf = 1.0*tok2tf[tok]
idf = log(N/Nt+0.01)
weight = tf*idf
tok2tfidf[tok] = weight
tok2idf[tok] = idf
# 计算联合出现的频率
for bgram, cnt in bgram2LDC.items():
left = bgram[0]
right = bgram[1]
dic_add(tok2LDC, left)
dic_add(tok2LDC, right)
for tok in tok2tf:
if tok not in tok2LDC:
tok2LDC[tok] = 1
#选择出现频率最高的前20%的词作为popularity
tfidf_tmp = sorted(tok2tfidf.items(), key=lambda x:x[1], reverse=True)
LDC_tmp = sorted(tok2LDC.items(), key=lambda x: x[1], reverse=True)
ratio = N*1.0/len(tfidf_tmp)
#选择尽可能不那么普遍的词
Tth = tfidf_tmp[int(len(tfidf_tmp)*ratio)][1]
Pmax = LDC_tmp[int(len(LDC_tmp)*0.1)][1]
popularity = {tok: freq for tok, freq in tok2tfidf.items() if is_popularity_ok(Tth, Pmax, tok2LDC[tok], freq)}
Rth = tfidf_tmp[int(len(tfidf_tmp)*(1-ratio))][1] #在选择rarity时要求词频不得太小
Rmax = LDC_tmp[int(len(LDC_tmp)*(1-ratio))][1]
rarity = {tok: freq for tok, freq in tok2tfidf.items() if is_rarity_ok(Tth, Pmax, 10, tok2LDC[tok], freq, Tth)}
#求联合出现最多的词项
maxLDC = max(tok2LDC.values())
print 'maxLDC:', maxLDC
cPickle.dump(tok2tf, open('%s/tok2tf.pkl' %dest_dir, 'wb'), -1)
cPickle.dump(tok2idf, open('%s/tok2idf.pkl' %dest_dir, 'wb'), -1)
cPickle.dump(bgram2LDC, open('%s/bgram2LDC.pkl' %dest_dir, 'wb'), -1)
with open('%s/tok2tf.txt' %dest_dir, 'w') as fw:
for tok, tf in tok2tf.items():
fw.write('%s\t%d\n' % (unicode2str(tok), tf))
with open('%s/popularity.txt' % dest_dir, 'w') as fw:
for tok, tfidf in popularity.items():
fw.write('%s\t%d\n' % (unicode2str(tok), tfidf))
with open('%s/rarity.txt' % dest_dir, 'w') as fw:
for tok, tfidf in rarity.items():
fw.write('%s\t%d\n' % (unicode2str(tok), tfidf))
cPickle.dump(tok2tfidf, open('%s/tok2tfidf.pkl' %dest_dir, 'wb'), -1)
with open('%s/tok2tfidf.txt' %dest_dir, 'w') as fw:
for tok, tfidf in tok2tfidf.items():
fw.write('%s\t%s\n' % (unicode2str(tok), str(tfidf)))
#compute LDC
cPickle.dump(tok2LDC, open('%s/tok2LDC.pkl' %dest_dir, 'wb'), -1)
with open('%s/tok2LDC.txt' %dest_dir, 'w') as fw:
for tok, LDC in tok2LDC.items():
fw.write('%s\t%s\n' % (unicode2str(tok), str(LDC)))
from sklearn.feature_selection import SelectKBest, chi2
import numpy as np
from sklearn.svm import LinearSVC
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
def chi_feature_select(train_file, test_file):
lines = read_text_src(train_file)
lines = [x for x in lines if len(x)>1]
X_train = [line[1] for line in lines]
y_train = [line[0] for line in lines]
lines = read_text_src(test_file)
lines = [x for x in lines if len(x) > 1]
X_test = [line[1] for line in lines]
y_test = [line[0] for line in lines]
vectorizer = TfidfVectorizer(tokenizer=zh_tokenize)#ngram_range=(1,2)
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
word = vectorizer.get_feature_names()
N = X_train.shape[1]
ch2 = SelectKBest(chi2, k=int(N*0.2)) #.fit_transform(X, y)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
feature_names = [word[i] for i
in ch2.get_support(indices=True)]
#
# for i in feature_names:
# print i.encode('utf-8')
# feature_names = np.asarray(feature_names)
# print feature_names
clf = LinearSVC(penalty="l1", dual=False, tol=1e-3)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
import lda
def test_lda(train_file, out_file):
lines = read_text_src(train_file)
lines = [x for x in lines if len(x) > 1]
X_train = [line[1] for line in lines]
y_train = [line[0] for line in lines]
# lines = read_text_src(test_file)
# lines = [x for x in lines if len(x) > 1]
# X_test = [line[1] for line in lines]
# y_test = [line[0] for line in lines]
vectorizer = CountVectorizer(tokenizer=zh_tokenize) # ngram_range=(1,2)
X_train = vectorizer.fit_transform(X_train)
# X_test = vectorizer.transform(X_test)
vocab = vectorizer.get_feature_names()
model = lda.LDA(n_topics=1, random_state=0, n_iter=1000)
model.fit_transform(X_train, y_train)
topic_word = model.components_ # model.components_ also works
N = len(vocab)
n_top_words = N*0.2
with open(out_file, 'w') as fw:
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words + 1):-1]
print '%d:%s' %(i, '\n'.join(topic_words))
data = '\n'.join(topic_words)
data = data.encode('utf-8')
fw.write('%s' %'\n'.join(topic_words))
if __name__=='__main__':
path = '/mnt/UbutunShare/graduate/DataSet/document1.txt'
# path = '/mnt/UbutunShare/graduate/DataSet/Big/C000008.txt'
# path = '/mnt/UbutunShare/graduate/DataSet/Big/C000014.txt'
path = '/mnt/UbutunShare/graduate/DataSet/Dic/tennis_news.txt'
train_file = 'sample-data/train-6-zh.txt'
test_file = 'sample-data/test-6-zh.txt'
# compute_tfidf(path, 'tmp')
# chi_feature_select(train_file, test_file)
test_lda(path, 'tennis.txt')
# test()
# test_chi() | gpl-3.0 |
sangwook236/SWDT | sw_dev/python/rnd/test/language_processing/konlpy_test.py | 2 | 6495 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >>
# https://github.com/konlpy/konlpy
# http://konlpy.org/ko/latest/
# http://konlpy.org/ko/latest/data/
# http://konlpy.org/ko/latest/references/
import os
import konlpy
import nltk
import wordcloud
import matplotlib.pyplot as plt
# REF [site] >> http://konlpy.org/ko/latest/api/konlpy.tag
def simple_kkma_example():
kkma = konlpy.tag.Kkma()
print('kkma.tagset =', kkma.tagset)
konlpy.utils.pprint(kkma.sentences('네, 안녕하세요. 반갑습니다.'))
konlpy.utils.pprint(kkma.nouns('질문이나 건의사항은 깃헙 이슈 트래커에 남겨주세요.'))
konlpy.utils.pprint(kkma.pos('오류보고는 실행환경, 에러메세지와함께 설명을 최대한상세히!^^'))
konlpy.utils.pprint(kkma.pos('오루보고는 실행환경, 에러메세지와함께 설명을 최대한상세히!^^')) # A typo exists.
print(kkma.sentences('그래도 계속 공부합니다. 재밌으니까!'))
print(kkma.nouns('대학에서 DB, 통계학, 이산수학 등을 배웠지만...'))
print(kkma.morphs('공부를 하면할수록 모르는게 많다는 것을 알게 됩니다.'))
print(kkma.pos('다 까먹어버렸네요?ㅋㅋ'))
# REF [site] >> http://konlpy.org/ko/latest/api/konlpy.tag
def simple_hannanum_example():
hannanum = konlpy.tag.Hannanum()
print('hannanum.tagset =', hannanum.tagset)
print(hannanum.nouns('다람쥐 헌 쳇바퀴에 타고파'))
print(hannanum.analyze('롯데마트의 흑마늘 양념 치킨이 논란이 되고 있다.'))
print(hannanum.morphs('롯데마트의 흑마늘 양념 치킨이 논란이 되고 있다.'))
print(hannanum.pos('웃으면 더 행복합니다!'))
# REF [site] >> http://konlpy.org/ko/latest/api/konlpy.tag
def simple_komoran_example():
# REF [file] >> ${konlpy_HOME}/konlpy/data/tagset/komoran.json
"""
In user_dic.txt:
코모란 NNP
오픈소스 NNG
바람과 함께 사라지다 NNP
"""
komoran = konlpy.tag.Komoran(userdic='./user_dic.txt')
print('komoran.tagset =', komoran.tagset)
print(komoran.nouns('오픈소스에 관심 많은 멋진 개발자님들!'))
print(komoran.morphs('우왕 코모란도 오픈소스가 되었어요'))
print(komoran.pos('혹시 바람과 함께 사라지다 봤어?'))
# REF [site] >> http://konlpy.org/ko/latest/api/konlpy.tag
def simple_mecab_example():
# Mecab is not supported on Windows.
mecab = konlpy.tag.Mecab()
print('mecab.tagset =', mecab.tagset)
print(mecab.nouns('우리나라에는 무릎 치료를 잘하는 정형외과가 없는가!'))
print(mecab.morphs('영등포구청역에 있는 맛집 좀 알려주세요.'))
print(mecab.pos('자연주의 쇼핑몰은 어떤 곳인가?'))
# REF [site] >> http://konlpy.org/ko/latest/api/konlpy.tag
def simple_okt_example():
# Twitter() has changed to Okt() since v0.5.0.
okt = konlpy.tag.Okt()
print('okt.tagset =', okt.tagset)
print(okt.phrases('날카로운 분석과 신뢰감 있는 진행으로'))
print(okt.nouns('유일하게 항공기 체계 종합개발 경험을 갖고 있는 KAI는'))
print(okt.morphs('단독입찰보다 복수입찰의 경우'))
print(okt.pos('이것도 되나욬ㅋㅋ'))
print(okt.pos('이것도 되나욬ㅋㅋ', norm=True))
print(okt.pos('이것도 되나욬ㅋㅋ', norm=True, stem=True))
# REF [site] >> http://konlpy.org/ko/latest/api/konlpy.corpus/
def simple_kolaw_corpus_example():
fids = konlpy.corpus.kolaw.fileids()
print(fids)
fobj = konlpy.corpus.kolaw.open(fids[0])
print(fobj.read(140))
c = konlpy.corpus.kolaw.open('constitution.txt').read()
print(c[:10])
# REF [site] >> http://konlpy.org/ko/latest/data/
def simple_kobill_corpus_example():
fids = konlpy.corpus.kobill.fileids()
print(fids)
d = konlpy.corpus.kobill.open('1809890.txt').read()
print(d[:15])
# REF [site] >> https://datascienceschool.net/view-notebook/70ce46db4ced4a999c6ec349df0f4eb0/
def integrate_with_nltk():
okt = konlpy.tag.Okt()
c = konlpy.corpus.kolaw.open('constitution.txt').read()
text = nltk.Text(okt.nouns(c), name='kolaw')
#print(text.vocab())
#print(len(text.vocab().items()))
#text.vocab().plot()
text.plot(30)
plt.show()
# REF [doc] >> "Python 환경에서 한글 형태소 분석기 패키지 KoNLPy 사용법.pdf"
def extract_bigram_or_trigram_with_nltk():
bigram_measures = nltk.collocations.BigramAssocMeasures()
doc = konlpy.corpus.kolaw.open('constitution.txt').read()
pos = konlpy.tag.Kkma().pos(doc)
words = [s for s, t in pos]
tags = [t for s, t in pos]
print('\nCollocations among tagged words:')
finder = nltk.collocations.BigramCollocationFinder.from_words(pos)
konlpy.utils.pprint(finder.nbest(bigram_measures.pmi, 10)) # Top 10 n-grams with highest PMI.
print('\nCollocations among words:')
ignored_words = ['안녕']
finder = nltk.collocations.BigramCollocationFinder.from_words(words)
finder.apply_word_filter(lambda w: len(w) < 2 or w in ignored_words)
finder.apply_freq_filter(3) # Only bigrams that appear 3+ times.
konlpy.utils.pprint(finder.nbest(bigram_measures.pmi, 10))
print('\nCollocations among tags:')
finder = nltk.collocations.BigramCollocationFinder.from_words(tags)
konlpy.utils.pprint(finder.nbest(bigram_measures.pmi, 5))
# REF [site] >> https://datascienceschool.net/view-notebook/70ce46db4ced4a999c6ec349df0f4eb0/
def integrate_with_wordcloud():
if 'posix' == os.name:
system_font_dir_path = '/usr/share/fonts'
font_base_dir_path = '/home/sangwook/work/font'
else:
system_font_dir_path = 'C:/Windows/Fonts'
font_base_dir_path = 'D:/work/font'
font_filepath = font_base_dir_path + '/kor/gulim.ttf'
okt = konlpy.tag.Okt()
c = konlpy.corpus.kolaw.open('constitution.txt').read()
text = nltk.Text(okt.nouns(c), name='kolaw')
wc = wordcloud.WordCloud(width=1000, height=600, background_color='white', font_path=font_filepath)
plt.imshow(wc.generate_from_frequencies(text.vocab()))
plt.axis('off')
plt.show()
def main():
# Initialize the Java virtual machine (JVM).
#konlpy.jvm.init_jvm(jvmpath=None, max_heap_size=1024)
#--------------------
#simple_kkma_example()
#simple_hannanum_example()
#simple_komoran_example()
#simple_mecab_example() # Error.
#simple_okt_example()
#simple_kolaw_corpus_example()
#simple_kobill_corpus_example()
#--------------------
#integrate_with_nltk()
extract_bigram_or_trigram_with_nltk()
#integrate_with_wordcloud()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 |
cloud-fan/spark | python/pyspark/pandas/tests/test_series_datetime.py | 15 | 10917 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesDateTimeTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf1(self):
date1 = pd.Series(pd.date_range("2012-1-1 12:45:31", periods=3, freq="M"))
date2 = pd.Series(pd.date_range("2013-3-11 21:45:00", periods=3, freq="W"))
return pd.DataFrame(dict(start_date=date1, end_date=date2))
@property
def pd_start_date(self):
return self.pdf1["start_date"]
@property
def ks_start_date(self):
return ps.from_pandas(self.pd_start_date)
def check_func(self, func):
self.assert_eq(func(self.ks_start_date), func(self.pd_start_date))
def test_timestamp_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
# Those fail in certain OSs presumably due to different
# timezone behaviours inherited from C library.
actual = (psdf["end_date"] - psdf["start_date"] - 1).to_pandas()
expected = (pdf["end_date"] - pdf["start_date"]) // np.timedelta64(1, "s") - 1
# self.assert_eq(actual, expected)
actual = (psdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31") - 1).to_pandas()
expected = (pdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31")) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
actual = (pd.Timestamp("2013-3-11 21:45:00") - psdf["start_date"] - 1).to_pandas()
expected = (pd.Timestamp("2013-3-11 21:45:00") - pdf["start_date"]) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "datetime subtraction can only be applied to datetime series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"]
def test_arithmetic_op_exceptions(self):
psser = self.ks_start_date
py_datetime = self.pd_start_date.dt.to_pydatetime()
datetime_index = ps.Index(self.pd_start_date)
for other in [1, 0.1, psser, datetime_index, py_datetime]:
expected_err_msg = "Addition can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser + other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other + psser)
expected_err_msg = "Multiplication can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser * other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other * psser)
expected_err_msg = "True division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser / other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other / psser)
expected_err_msg = "Floor division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser // other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other // psser)
expected_err_msg = "Modulo can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser % other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other % psser)
expected_err_msg = "datetime subtraction can only be applied to datetime series."
for other in [1, 0.1]:
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other - psser)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaises(NotImplementedError, lambda: py_datetime - psser)
def test_date_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf["end_date"].dt.date - psdf["start_date"].dt.date,
(pdf["end_date"].dt.date - pdf["start_date"].dt.date).dt.days,
)
self.assert_eq(
psdf["end_date"].dt.date - datetime.date(2012, 1, 1),
(pdf["end_date"].dt.date - datetime.date(2012, 1, 1)).dt.days,
)
self.assert_eq(
datetime.date(2013, 3, 11) - psdf["start_date"].dt.date,
(datetime.date(2013, 3, 11) - pdf["start_date"].dt.date).dt.days,
)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "date subtraction can only be applied to date series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"].dt.date
@unittest.skip(
"It fails in certain OSs presumably due to different "
"timezone behaviours inherited from C library."
)
def test_div(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
for u in "D", "s", "ms":
duration = np.timedelta64(1, u)
self.assert_eq(
(psdf["end_date"] - psdf["start_date"]) / duration,
(pdf["end_date"] - pdf["start_date"]) / duration,
)
@unittest.skip("It is currently failed probably for the same reason in 'test_subtraction'")
def test_date(self):
self.check_func(lambda x: x.dt.date)
def test_time(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.time)
def test_timetz(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.timetz)
def test_year(self):
self.check_func(lambda x: x.dt.year)
def test_month(self):
self.check_func(lambda x: x.dt.month)
def test_day(self):
self.check_func(lambda x: x.dt.day)
def test_hour(self):
self.check_func(lambda x: x.dt.hour)
def test_minute(self):
self.check_func(lambda x: x.dt.minute)
def test_second(self):
self.check_func(lambda x: x.dt.second)
def test_microsecond(self):
self.check_func(lambda x: x.dt.microsecond)
def test_nanosecond(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.nanosecond)
def test_week(self):
self.check_func(lambda x: x.dt.week)
def test_weekofyear(self):
self.check_func(lambda x: x.dt.weekofyear)
def test_dayofweek(self):
self.check_func(lambda x: x.dt.dayofweek)
def test_weekday(self):
self.check_func(lambda x: x.dt.weekday)
def test_dayofyear(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_quarter(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_is_month_start(self):
self.check_func(lambda x: x.dt.is_month_start)
def test_is_month_end(self):
self.check_func(lambda x: x.dt.is_month_end)
def test_is_quarter_start(self):
self.check_func(lambda x: x.dt.is_quarter_start)
def test_is_quarter_end(self):
self.check_func(lambda x: x.dt.is_quarter_end)
def test_is_year_start(self):
self.check_func(lambda x: x.dt.is_year_start)
def test_is_year_end(self):
self.check_func(lambda x: x.dt.is_year_end)
def test_is_leap_year(self):
self.check_func(lambda x: x.dt.is_leap_year)
def test_daysinmonth(self):
self.check_func(lambda x: x.dt.daysinmonth)
def test_days_in_month(self):
self.check_func(lambda x: x.dt.days_in_month)
@unittest.expectedFailure
def test_tz_localize(self):
self.check_func(lambda x: x.dt.tz_localize("America/New_York"))
@unittest.expectedFailure
def test_tz_convert(self):
self.check_func(lambda x: x.dt.tz_convert("America/New_York"))
def test_normalize(self):
self.check_func(lambda x: x.dt.normalize())
def test_strftime(self):
self.check_func(lambda x: x.dt.strftime("%Y-%m-%d"))
def test_round(self):
self.check_func(lambda x: x.dt.round(freq="min"))
self.check_func(lambda x: x.dt.round(freq="H"))
def test_floor(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
def test_ceil(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
@unittest.skip("Unsupported locale setting")
def test_month_name(self):
self.check_func(lambda x: x.dt.month_name())
self.check_func(lambda x: x.dt.month_name(locale="en_US.UTF-8"))
@unittest.skip("Unsupported locale setting")
def test_day_name(self):
self.check_func(lambda x: x.dt.day_name())
self.check_func(lambda x: x.dt.day_name(locale="en_US.UTF-8"))
def test_unsupported_type(self):
self.assertRaisesRegex(
ValueError, "Cannot call DatetimeMethods on type LongType", lambda: ps.Series([0]).dt
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series_datetime import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
zzcclp/spark | python/pyspark/pandas/base.py | 6 | 56113 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base and utility classes for pandas-on-Spark objects.
"""
from abc import ABCMeta, abstractmethod
from functools import wraps, partial
from itertools import chain
from typing import Any, Callable, Optional, Sequence, Tuple, Union, cast, TYPE_CHECKING
import numpy as np
import pandas as pd # noqa: F401
from pandas.api.types import is_list_like, CategoricalDtype
from pyspark.sql import functions as F, Column, Window
from pyspark.sql.types import (
DoubleType,
FloatType,
LongType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, IndexOpsLike, Label, SeriesOrIndex
from pyspark.pandas.config import get_option, option_context
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkIndexOpsMethods
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.utils import (
combine_frames,
same_anchor,
scol_for,
validate_axis,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from pyspark.pandas.frame import DataFrame
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.data_type_ops.base import DataTypeOps # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def should_alignment_for_column_op(self: SeriesOrIndex, other: SeriesOrIndex) -> bool:
from pyspark.pandas.series import Series
if isinstance(self, Series) and isinstance(other, Series):
return not same_anchor(self, other)
else:
return self._internal.spark_frame is not other._internal.spark_frame
def align_diff_index_ops(
func: Callable[..., Column], this_index_ops: SeriesOrIndex, *args: Any
) -> SeriesOrIndex:
"""
Align the `IndexOpsMixin` objects and apply the function.
Parameters
----------
func : The function to apply
this_index_ops : IndexOpsMixin
A base `IndexOpsMixin` object
args : list of other arguments including other `IndexOpsMixin` objects
Returns
-------
`Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`
"""
from pyspark.pandas.indexes import Index
from pyspark.pandas.series import Series, first_series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):
combined = combine_frames(
this_index_ops.to_frame(),
*[cast(Series, col).rename(i) for i, col in enumerate(cols)],
how="full"
)
return column_op(func)(
combined["this"]._psser_for(combined["this"]._internal.column_labels[0]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
# This could cause as many counts, reset_index calls, joins for combining
# as the number of `Index`s in `args`. So far it's fine since we can assume the ops
# only work between at most two `Index`s. We might need to fix it in the future.
self_len = len(this_index_ops)
if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):
raise ValueError("operands could not be broadcast together with shapes")
with option_context("compute.default_index_type", "distributed-sequence"):
if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):
return Index(
column_op(func)(
this_index_ops.to_series().reset_index(drop=True),
*[
arg.to_series().reset_index(drop=True)
if isinstance(arg, Index)
else arg
for arg in args
]
).sort_index(),
name=this_index_ops.name,
)
elif isinstance(this_index_ops, Series):
this = cast(DataFrame, this_index_ops.reset_index())
that = [
cast(Series, col.to_series() if isinstance(col, Index) else col)
.rename(i)
.reset_index(drop=True)
for i, col in enumerate(cols)
]
combined = combine_frames(this, *that, how="full").sort_index()
combined = combined.set_index(
combined._internal.column_labels[: this_index_ops._internal.index_level]
)
combined.index.names = this_index_ops._internal.index_names
return column_op(func)(
first_series(combined["this"]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)
that_series = next(col for col in cols if isinstance(col, Series))
that_frame = that_series._psdf[
[
cast(Series, col.to_series() if isinstance(col, Index) else col).rename(i)
for i, col in enumerate(cols)
]
]
combined = combine_frames(this, that_frame.reset_index()).sort_index()
self_index = (
combined["this"].set_index(combined["this"]._internal.column_labels).index
)
other = combined["that"].set_index(
combined["that"]._internal.column_labels[: that_series._internal.index_level]
)
other.index.names = that_series._internal.index_names
return column_op(func)(
self_index,
*[
other._psser_for(label)
for label, col in zip(other._internal.column_labels, cols)
]
).rename(that_series.name)
def booleanize_null(scol: Column, f: Callable[..., Column]) -> Column:
"""
Booleanize Null in Spark Column
"""
comp_ops = [
getattr(Column, "__{}__".format(comp_op))
for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"]
]
if f in comp_ops:
# if `f` is "!=", fill null with True otherwise False
filler = f == Column.__ne__
scol = F.when(scol.isNull(), filler).otherwise(scol)
return scol
def column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
"""
A decorator that wraps APIs taking/returning Spark Column so that pandas-on-Spark Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes pandas-on-Spark Series as well and returns
pandas-on-Spark Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: pandas-on-Spark Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.series import Series
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is pandas-on-Spark Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, (Series, Index))]
if all(not should_alignment_for_column_op(self, col) for col in cols):
# Same DataFrame anchors
scol = f(
self.spark.column,
*[arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]
)
field = InternalField.from_struct_field(
self._internal.spark_frame.select(scol).schema[0],
use_extension_dtypes=any(
isinstance(col.dtype, extension_dtypes) for col in [self] + cols
),
)
if not field.is_extension_dtype:
scol = booleanize_null(scol, f).alias(field.name)
if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):
index_ops = self._with_new_scol(scol, field=field)
else:
psser = next(col for col in cols if isinstance(col, Series))
index_ops = psser._with_new_scol(scol, field=field)
elif get_option("compute.ops_on_diff_frames"):
index_ops = align_diff_index_ops(f, self, *args)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
if not all(self.name == col.name for col in cols):
index_ops = index_ops.rename(None)
return index_ops
return wrapper
def numpy_column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, "s")))
else:
new_args.append(arg)
return column_op(f)(self, *new_args)
return wrapper
class IndexOpsMixin(object, metaclass=ABCMeta):
"""common ops mixin to support a unified interface / docs for Series / Index
Assuming there are following attributes or properties and function.
"""
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@property
@abstractmethod
def _psdf(self) -> DataFrame:
pass
@abstractmethod
def _with_new_scol(
self: IndexOpsLike, scol: Column, *, field: Optional[InternalField] = None
) -> IndexOpsLike:
pass
@property
@abstractmethod
def _column_label(self) -> Optional[Label]:
pass
@property
@abstractmethod
def spark(self: IndexOpsLike) -> SparkIndexOpsMethods[IndexOpsLike]:
pass
@property
def _dtype_op(self) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.base import DataTypeOps
return DataTypeOps(self.dtype, self.spark.data_type)
@abstractmethod
def copy(self: IndexOpsLike) -> IndexOpsLike:
pass
# arithmetic operators
def __neg__(self: IndexOpsLike) -> IndexOpsLike:
return self._dtype_op.neg(self)
def __add__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.add(self, other)
def __sub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.sub(self, other)
def __mul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mul(self, other)
def __truediv__(self, other: Any) -> SeriesOrIndex:
"""
__truediv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.truediv(self, other)
def __mod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mod(self, other)
def __radd__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.radd(self, other)
def __rsub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rsub(self, other)
def __rmul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmul(self, other)
def __rtruediv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rtruediv(self, other)
def __floordiv__(self, other: Any) -> SeriesOrIndex:
"""
__floordiv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.floordiv(self, other)
def __rfloordiv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rfloordiv(self, other)
def __rmod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmod(self, other)
def __pow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.pow(self, other)
def __rpow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rpow(self, other)
def __abs__(self: IndexOpsLike) -> IndexOpsLike:
return self._dtype_op.abs(self)
# comparison operators
def __eq__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return self._dtype_op.eq(self, other)
def __ne__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return self._dtype_op.ne(self, other)
def __lt__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.lt(self, other)
def __le__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.le(self, other)
def __ge__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.ge(self, other)
def __gt__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.gt(self, other)
def __invert__(self: IndexOpsLike) -> IndexOpsLike:
return self._dtype_op.invert(self)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
def __and__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__and__(self, other)
def __or__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__or__(self, other)
def __rand__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rand(self, other)
def __ror__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.ror(self, other)
def __len__(self) -> int:
return len(self._psdf)
# NDArray Compat
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
) -> SeriesOrIndex:
from pyspark.pandas import numpy_compat
# Try dunder methods first.
result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
# After that, we try with PySpark APIs.
if result is NotImplemented:
result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return cast(SeriesOrIndex, result)
else:
# TODO: support more APIs?
raise NotImplementedError(
"pandas-on-Spark objects currently do not support %s." % ufunc
)
@property
def dtype(self) -> Dtype:
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ps.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ps.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
return self._internal.data_fields[0].dtype
@property
def empty(self) -> bool:
"""
Returns true if the current object is empty. Otherwise, returns false.
>>> ps.range(10).id.empty
False
>>> ps.range(0).id.empty
True
>>> ps.DataFrame({}, index=list('abc')).index.empty
False
"""
return self._internal.resolved_copy.spark_frame.rdd.isEmpty()
@property
def hasnans(self) -> bool:
"""
Return True if it has any missing values. Otherwise, it returns False.
>>> ps.DataFrame({}, index=list('abc')).index.hasnans
False
>>> ps.Series(['a', None]).hasnans
True
>>> ps.Series([1.0, 2.0, np.nan]).hasnans
True
>>> ps.Series([1, 2, 3]).hasnans
False
>>> (ps.Series([1.0, 2.0, np.nan]) + 1).hasnans
True
>>> ps.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans
False
"""
sdf = self._internal.spark_frame
scol = self.spark.column
if isinstance(self.spark.data_type, (DoubleType, FloatType)):
return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]
else:
return sdf.select(F.max(scol.isNull())).collect()[0][0]
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are monotonically increasing.
.. note:: the current implementation of is_monotonic requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are
transferred to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['1/1/2018', '3/1/2018', '4/1/2018'])
>>> ser.is_monotonic
True
>>> df = ps.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})
>>> df.dates.is_monotonic
False
>>> df.index.is_monotonic
True
>>> ser = ps.Series([1])
>>> ser.is_monotonic
True
>>> ser = ps.Series([])
>>> ser.is_monotonic
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic
False
>>> ser.index.is_monotonic
True
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic
True
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic
False
"""
return self._is_monotonic("increasing")
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are monotonically decreasing.
.. note:: the current implementation of is_monotonic_decreasing requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are transferred
to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['4/1/2018', '3/1/2018', '1/1/2018'])
>>> ser.is_monotonic_decreasing
True
>>> df = ps.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})
>>> df.dates.is_monotonic_decreasing
False
>>> df.index.is_monotonic_decreasing
False
>>> ser = ps.Series([1])
>>> ser.is_monotonic_decreasing
True
>>> ser = ps.Series([])
>>> ser.is_monotonic_decreasing
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic_decreasing
True
>>> ser.index.is_monotonic_decreasing
False
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic_decreasing
False
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic_decreasing
True
"""
return self._is_monotonic("decreasing")
def _is_locally_monotonic_spark_column(self, order: str) -> Column:
window = (
Window.partitionBy(F.col("__partition_id"))
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-1, -1)
)
if order == "increasing":
return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
else:
return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
def _is_monotonic(self, order: str) -> bool:
assert order in ("increasing", "decreasing")
sdf = self._internal.spark_frame
sdf = (
sdf.select(
F.spark_partition_id().alias(
"__partition_id"
), # Make sure we use the same partition id in the whole job.
F.col(NATURAL_ORDER_COLUMN_NAME),
self.spark.column.alias("__origin"),
)
.select(
F.col("__partition_id"),
F.col("__origin"),
self._is_locally_monotonic_spark_column(order).alias(
"__comparison_within_partition"
),
)
.groupby(F.col("__partition_id"))
.agg(
F.min(F.col("__origin")).alias("__partition_min"),
F.max(F.col("__origin")).alias("__partition_max"),
F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True))).alias(
"__comparison_within_partition"
),
)
)
# Now we're windowing the aggregation results without partition specification.
# The number of rows here will be as the same of partitions, which is expected
# to be small.
window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1)
if order == "increasing":
comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over(
window
)
else:
comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over(
window
)
sdf = sdf.select(
comparison_col.alias("__comparison_between_partitions"),
F.col("__comparison_within_partition"),
)
ret = sdf.select(
F.min(F.coalesce(F.col("__comparison_between_partitions"), SF.lit(True)))
& F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True)))
).collect()[0][0]
if ret is None:
return True
else:
return ret
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
Return 1 for Series / Index / MultiIndex.
Examples
--------
For Series
>>> s = ps.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])
>>> s.ndim
1
For Index
>>> s.index.ndim
1
For MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index.ndim
1
"""
return 1
def astype(self: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""
Cast a pandas-on-Spark object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ps.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
>>> ser.rename("a").to_frame().set_index("a").index.astype('int64')
Int64Index([1, 2], dtype='int64', name='a')
"""
return self._dtype_op.astype(self, dtype)
def isin(self: IndexOpsLike, values: Sequence[Any]) -> IndexOpsLike:
"""
Check whether `values` are contained in Series or Index.
Return a boolean Series or Index showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test.
Returns
-------
isin : Series (bool dtype) or Index (bool dtype)
Examples
--------
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
>>> s.rename("a").to_frame().set_index("a").index.isin(['lama'])
Index([True, False, True, False, True, False], dtype='object', name='a')
"""
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__)
)
values = values.tolist() if isinstance(values, np.ndarray) else list(values)
return self._with_new_scol(self.spark.column.isin([SF.lit(v) for v in values]))
def isnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.isna()
Index([False, False, True], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
return self._dtype_op.isnull(self)
isna = isnull
def notnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.notna()
Index([True, True, False], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("notna is not defined for MultiIndex")
return (~self.isnull()).rename(self.name) # type: ignore
notna = notnull
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Axis = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([True, True]).all()
True
>>> ps.Series([True, False]).all()
False
>>> ps.Series([0, 1]).all()
False
>>> ps.Series([1, 2, 3]).all()
True
>>> ps.Series([True, True, None]).all()
True
>>> ps.Series([True, False, None]).all()
False
>>> ps.Series([]).all()
True
>>> ps.Series([np.nan]).all()
True
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.all()
False
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), SF.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Axis = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([False, False]).any()
False
>>> ps.Series([True, False]).any()
True
>>> ps.Series([0, 0]).any()
False
>>> ps.Series([0, 1, 2]).any()
True
>>> ps.Series([False, False, None]).any()
False
>>> ps.Series([True, False, None]).any()
True
>>> ps.Series([]).any()
False
>>> ps.Series([np.nan]).any()
False
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.any()
True
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use max as its alternative:
ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), SF.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
# TODO: add frep and axis parameter
def shift(
self: IndexOpsLike, periods: int = 1, fill_value: Optional[Any] = None
) -> IndexOpsLike:
"""
Shift Series/Index by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input Series/Index, shifted.
Examples
--------
>>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.Col1.shift(periods=3)
0 NaN
1 NaN
2 NaN
3 10.0
4 20.0
Name: Col1, dtype: float64
>>> df.Col2.shift(periods=3, fill_value=0)
0 0
1 0
2 0
3 13
4 23
Name: Col2, dtype: int64
>>> df.index.shift(periods=3, fill_value=0)
Int64Index([0, 0, 0, 0, 1], dtype='int64')
"""
return self._shift(periods, fill_value).spark.analyzed
def _shift(
self: IndexOpsLike,
periods: int,
fill_value: Any,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> IndexOpsLike:
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
col = self.spark.column
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col, field=self._internal.data_fields[0].copy(nullable=True))
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins: None = None,
dropna: bool = True,
) -> "Series":
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
For Series
>>> df = ps.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
For Index
>>> idx = ps.Index([3, 1, 2, 3, 4, np.nan])
>>> idx
Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')
>>> idx.value_counts().sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**sort**
With `sort` set to `False`, the result wouldn't be sorted by number of count.
>>> idx.value_counts(sort=True).sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**normalize**
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> idx.value_counts(normalize=True).sort_index()
1.0 0.2
2.0 0.2
3.0 0.4
4.0 0.2
dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP
1.0 1
2.0 1
3.0 2
4.0 1
NaN 1
dtype: int64
For MultiIndex.
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index # doctest: +SKIP
MultiIndex([( 'lama', 'weight'),
( 'lama', 'weight'),
( 'lama', 'weight'),
( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'weight'),
('falcon', 'length'),
('falcon', 'length')],
)
>>> s.index.value_counts().sort_index()
(cow, length) 1
(cow, weight) 2
(falcon, length) 2
(falcon, weight) 1
(lama, weight) 3
dtype: int64
>>> s.index.value_counts(normalize=True).sort_index()
(cow, length) 0.111111
(cow, weight) 0.222222
(falcon, length) 0.222222
(falcon, weight) 0.111111
(lama, weight) 0.333333
dtype: float64
If Index has name, keep the name up.
>>> idx = ps.Index([0, 0, 0, 1, 1, 2, 3], name='pandas-on-Spark')
>>> idx.value_counts().sort_index()
0 3
1 2
2 1
3 1
Name: pandas-on-Spark, dtype: int64
"""
from pyspark.pandas.series import first_series
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()
else:
sdf_dropna = self._internal.spark_frame.select(self.spark.column)
index_name = SPARK_DEFAULT_INDEX_NAME
column_name = self._internal.data_spark_column_names[0]
sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col("count"))
else:
sdf = sdf.orderBy(F.col("count").desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn("count", F.col("count") / SF.lit(sum))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, index_name)],
column_labels=self._internal.column_labels,
data_spark_columns=[scol_for(sdf, "count")],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to pandas-on-Spark and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> ps.Series([1, 2, 3, np.nan]).nunique()
3
>>> ps.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ps.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
>>> idx = ps.Index([1, 1, 2, None])
>>> idx
Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')
>>> idx.nunique()
2
>>> idx.nunique(dropna=False)
3
"""
res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])
return res.collect()[0][0]
def _nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> Column:
colname = self._internal.data_spark_column_names[0]
count_fn = cast(
Callable[[Column], Column],
partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct,
)
if dropna:
return count_fn(self.spark.column).alias(colname)
else:
return (
count_fn(self.spark.column)
+ F.when(
F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
).alias(colname)
def take(self: IndexOpsLike, indices: Sequence[int]) -> IndexOpsLike:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
Series
>>> psser = ps.Series([100, 200, 300, 400, 500])
>>> psser
0 100
1 200
2 300
3 400
4 500
dtype: int64
>>> psser.take([0, 2, 4]).sort_index()
0 100
2 300
4 500
dtype: int64
Index
>>> psidx = ps.Index([100, 200, 300, 400, 500])
>>> psidx
Int64Index([100, 200, 300, 400, 500], dtype='int64')
>>> psidx.take([0, 2, 4]).sort_values()
Int64Index([100, 300, 500], dtype='int64')
MultiIndex
>>> psmidx = ps.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c")])
>>> psmidx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('x', 'c')],
)
>>> psmidx.take([0, 2]) # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'c')],
)
"""
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise TypeError("`indices` must be a list-like except dict or set")
if isinstance(self, ps.Series):
return cast(IndexOpsLike, self.iloc[indices])
else:
return cast(IndexOpsLike, self._psdf.iloc[indices].index)
def factorize(
self: IndexOpsLike, sort: bool = True, na_sentinel: Optional[int] = -1
) -> Tuple[IndexOpsLike, pd.Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values.
Parameters
----------
sort : bool, default True
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
Returns
-------
codes : Series or Index
A Series or Index that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : pd.Index
The unique valid values.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
Examples
--------
>>> psser = ps.Series(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psser.factorize()
>>> codes
0 1
1 -1
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=None)
>>> codes
0 1
1 3
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c', None], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=-2)
>>> codes
0 1
1 -2
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
For Index:
>>> psidx = ps.Index(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psidx.factorize()
>>> codes
Int64Index([1, -1, 0, 2, 1], dtype='int64')
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
"""
from pyspark.pandas.series import first_series
assert (na_sentinel is None) or isinstance(na_sentinel, int)
assert sort is True
if isinstance(self.dtype, CategoricalDtype):
categories = self.dtype.categories
if len(categories) == 0:
scol = SF.lit(None)
else:
kvs = list(
chain(
*[
(SF.lit(code), SF.lit(category))
for code, category in enumerate(categories)
]
)
)
map_scol = F.create_map(*kvs)
scol = map_scol.getItem(self.spark.column)
codes, uniques = self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0])
).factorize(na_sentinel=na_sentinel)
return codes, uniques.astype(self.dtype)
uniq_sdf = self._internal.spark_frame.select(self.spark.column).distinct()
# Check number of uniques and constructs sorted `uniques_list`
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
uniq_pdf = uniq_sdf.limit(max_compute_count + 1).toPandas()
if len(uniq_pdf) > max_compute_count:
raise ValueError(
"Current Series has more then {0} unique values. "
"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' "
"to more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
else:
uniq_pdf = uniq_sdf.toPandas()
# pandas takes both NaN and null in Spark to np.nan, so de-duplication is required
uniq_series = first_series(uniq_pdf).drop_duplicates()
uniques_list = uniq_series.tolist()
uniques_list = sorted(uniques_list, key=lambda x: (pd.isna(x), x))
# Constructs `unique_to_code` mapping non-na unique to code
unique_to_code = {}
if na_sentinel is not None:
na_sentinel_code = na_sentinel
code = 0
for unique in uniques_list:
if pd.isna(unique):
if na_sentinel is None:
na_sentinel_code = code
else:
unique_to_code[unique] = code
code += 1
kvs = list(
chain(*([(SF.lit(unique), SF.lit(code)) for unique, code in unique_to_code.items()]))
)
if len(kvs) == 0: # uniques are all missing values
new_scol = SF.lit(na_sentinel_code)
else:
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
cond = scol.isNull()
map_scol = F.create_map(*kvs)
null_scol = F.when(cond, SF.lit(na_sentinel_code))
new_scol = null_scol.otherwise(map_scol.getItem(scol))
codes = self._with_new_scol(new_scol.alias(self._internal.data_spark_column_names[0]))
if na_sentinel is not None:
# Drops the NaN from the uniques of the values
uniques_list = [x for x in uniques_list if not pd.isna(x)]
uniques = pd.Index(uniques_list)
return codes, uniques
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.base
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.base.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.base tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.base,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
naturali/tensorflow | tensorflow/examples/skflow/digits.py | 9 | 2380 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, cross_validation, metrics
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn import monitors
# Load dataset
digits = datasets.load_digits()
X = digits.images
y = digits.target
# Split it into train / test subsets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2,
random_state=42)
# Split X_train again to create validation data
X_train, X_val, y_train, y_val = cross_validation.train_test_split(X_train,
y_train,
test_size=0.2,
random_state=42)
# TensorFlow model using Scikit Flow ops
def conv_model(X, y):
X = tf.expand_dims(X, 3)
features = tf.reduce_max(tf.contrib.layers.conv2d(X, 12, [3, 3]), [1, 2])
features = tf.reshape(features, [-1, 12])
return learn.models.logistic_regression(features, y)
val_monitor = monitors.ValidationMonitor(X_val, y_val, every_n_steps=50)
# Create a classifier, train and predict.
classifier = learn.TensorFlowEstimator(model_fn=conv_model, n_classes=10,
steps=1000, learning_rate=0.05,
batch_size=128)
classifier.fit(X_train, y_train, monitors=[val_monitor])
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
| apache-2.0 |
elijah513/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Christoph/tag-connect | embeddings/nlp_pipeline.py | 1 | 2809 | from importlib import reload
import spacy
from sklearn.decomposition import TruncatedSVD, PCA, NMF, LatentDirichletAllocation
from sklearn.metrics.pairwise import cosine_similarity
from scipy.stats import wasserstein_distance
from scipy.stats import entropy
import numpy as np
from os import path
import sys
sys.path.append(path.abspath('../methods'))
import embedding
import details
import helpers
import vis
# nlp = spacy.load('en')
# Load data
(texts, clean, clean_fancy, labels) = helpers.load_reuters_data()
used = clean_fancy
used = clean
len(clean[0])
len(clean_fancy[0])
reload(vis)
# Word vectors
(word_vecs, word_docs, vocab_vecs, vocab) = embedding.HAL(used, True)
(word_vecs, word_docs, vocab_vecs, vocab) = embedding.W2V(used)
# High dimensional clustering dependent document vectors
(cluster_labels, dist, clusterer) = embedding.hsc_auto(word_vecs, vocab_vecs)
(cluster_labels, dist, clusterer) = embedding.hsc_binned(word_vecs, vocab_vecs, "birch", 16)
vis.simMatrixIntersection(embedding.earth_mover_distance(dist), used)
# Document vectors
(vecs, vectorizer) = embedding.count(used)
(vecs, vectorizer) = embedding.tfidf(used)
reduced = TruncatedSVD(20).fit_transform(vecs)
reduced = PCA(3).fit_transform(vecs.toarray())
reduced = NMF(10, beta_loss='frobenius').fit_transform(vecs)
reduced = NMF(50, beta_loss="kullback-leibler", solver="mu").fit_transform(vecs)
reduced = LatentDirichletAllocation(5).fit_transform(vecs)
vecs = reduced
sim = cosine_similarity(vecs)
vis.simMatrixIntersection(sim, used)
vis.scree_plot(sim, vecs, nonlinear=False)
vis.scatter_tsne(vecs, labels)
vis.scatter_mds(vecs, labels)
vis.scatter_svd(vecs, labels)
# Comparing description axis
axis_words = helpers.get_dimension_words(vectorizer, vecs, reduced)
print(axis_words)
reload(cluster_analysis)
reload(vis)
# Cluster analysis
classes = cluster_analysis.prepare_label_data(labels, word_vecs, word_docs)
documents = cluster_analysis.prepare_data(word_vecs, word_docs)
tfidf_documents = cluster_analysis.prepare_tfidf_data(vecs.toarray(), used)
# Algorithms: agglo, km, gauss, aff
# predictions, vocab_labels = cluster_analysis.cluster_space(classes, vocab_vecs, vocab, bins=16, algorithm="aff")
predictions, dist_predictions, vocab_labels = cluster_analysis.cluster_space(documents, vocab_vecs, vocab, bins=16, algorithm="agglo")
predictions, vocab_labels = cluster_analysis.cluster_space_hdb(classes, vocab_vecs, vocab)
vis.cluster_document(dist_predictions, vocab_labels)
vis.multi_histogram(predictions)
sim = embedding.earth_mover_distance(dist_predictions)
wasserstein_distance(predictions["0"], predictions["1"])
[v[0] for v in vocab_labels.items() if v[1] == 2]
set([v[0] for v in vocab_labels.items() if v[1] == 1]).intersection(
[v[0] for v in vocab_labels.items() if v[1] == 14])
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.