text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""RCV1 dataset.
"""
# Author: Tom Dupre la Tour
# License: BSD 3 clause
import logging
from os.path import exists, join
from gzip import GzipFile
from io import BytesIO
from contextlib import closing
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from ..utils.fixes import makedirs
from ..externals import joblib
from .svmlight_format import load_svmlight_files
from ..utils import shuffle as shuffle_
URL = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a13-vector-files/lyrl2004_vectors')
URL_topics = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a08-topic-qrels/rcv1-v2.topics.qrels.gz')
logger = logging.getLogger()
def fetch_rcv1(data_home=None, subset='all', download_if_missing=True,
random_state=None, shuffle=False):
"""Load the RCV1 multilabel dataset, downloading it if necessary.
Version: RCV1-v2, vectors, full sets, topics multilabels.
============== =====================
Classes 103
Samples total 804414
Dimensionality 47236
Features real, between 0 and 1
============== =====================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
subset: string, 'train', 'test', or 'all', default='all'
Select the dataset to load: 'train' for the training set
(23149 samples), 'test' for the test set (781265 samples),
'all' for both, with the training samples first if shuffle is False.
This follows the official LYRL2004 chronological split.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : scipy csr array, dtype np.float64, shape (804414, 47236)
The array has 0.16% of non zero values.
dataset.target : scipy csr array, dtype np.uint8, shape (804414, 103)
Each sample has a value of 1 in its categories, and 0 in others.
The array has 3.15% of non zero values.
dataset.sample_id : numpy array, dtype np.uint32, shape (804414,)
Identification number of each sample, as ordered in dataset.data.
dataset.target_names : numpy array, dtype object, length (103)
Names of each target (RCV1 topics), as ordered in dataset.target.
dataset.DESCR : string
Description of the RCV1 dataset.
Reference
---------
Lewis, D. D., Yang, Y., Rose, T. G., & Li, F. (2004). RCV1: A new
benchmark collection for text categorization research. The Journal of
Machine Learning Research, 5, 361-397.
"""
N_SAMPLES = 804414
N_FEATURES = 47236
N_CATEGORIES = 103
N_TRAIN = 23149
data_home = get_data_home(data_home=data_home)
rcv1_dir = join(data_home, "RCV1")
if download_if_missing:
makedirs(rcv1_dir, exist_ok=True)
samples_path = join(rcv1_dir, "samples.pkl")
sample_id_path = join(rcv1_dir, "sample_id.pkl")
sample_topics_path = join(rcv1_dir, "sample_topics.pkl")
topics_path = join(rcv1_dir, "topics_names.pkl")
# load data (X) and sample_id
if download_if_missing and (not exists(samples_path) or
not exists(sample_id_path)):
file_urls = ["%s_test_pt%d.dat.gz" % (URL, i) for i in range(4)]
file_urls.append("%s_train.dat.gz" % URL)
files = []
for file_url in file_urls:
logger.warning("Downloading %s" % file_url)
with closing(urlopen(file_url)) as online_file:
# buffer the full file in memory to make possible to Gzip to
# work correctly
f = BytesIO(online_file.read())
files.append(GzipFile(fileobj=f))
Xy = load_svmlight_files(files, n_features=N_FEATURES)
# Training data is before testing data
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
sample_id = sample_id.astype(np.uint32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(sample_id, sample_id_path, compress=9)
else:
X = joblib.load(samples_path)
sample_id = joblib.load(sample_id_path)
# load target (y), categories, and sample_id_bis
if download_if_missing and (not exists(sample_topics_path) or
not exists(topics_path)):
logger.warning("Downloading %s" % URL_topics)
with closing(urlopen(URL_topics)) as online_topics:
f = BytesIO(online_topics.read())
# parse the target file
n_cat = -1
n_doc = -1
doc_previous = -1
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
category_names = {}
for line in GzipFile(fileobj=f, mode='rb'):
line_components = line.decode("ascii").split(u" ")
if len(line_components) == 3:
cat, doc, _ = line_components
if cat not in category_names:
n_cat += 1
category_names[cat] = n_cat
doc = int(doc)
if doc != doc_previous:
doc_previous = doc
n_doc += 1
sample_id_bis[n_doc] = doc
y[n_doc, category_names[cat]] = 1
# Samples in X are ordered with sample_id,
# whereas in y, they are ordered with sample_id_bis.
permutation = _find_permutation(sample_id_bis, sample_id)
y = sp.csr_matrix(y[permutation, :])
# save category names in a list, with same order than y
categories = np.empty(N_CATEGORIES, dtype=object)
for k in category_names.keys():
categories[category_names[k]] = k
joblib.dump(y, sample_topics_path, compress=9)
joblib.dump(categories, topics_path, compress=9)
else:
y = joblib.load(sample_topics_path)
categories = joblib.load(topics_path)
if subset == 'all':
pass
elif subset == 'train':
X = X[:N_TRAIN, :]
y = y[:N_TRAIN, :]
sample_id = sample_id[:N_TRAIN]
elif subset == 'test':
X = X[N_TRAIN:, :]
y = y[N_TRAIN:, :]
sample_id = sample_id[N_TRAIN:]
else:
raise ValueError("Unknown subset parameter. Got '%s' instead of one"
" of ('all', 'train', test')" % subset)
if shuffle:
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
return Bunch(data=X, target=y, sample_id=sample_id,
target_names=categories, DESCR=__doc__)
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
def _find_permutation(a, b):
"""find the permutation from a to b"""
t = np.argsort(a)
u = np.argsort(b)
u_ = _inverse_permutation(u)
return t[u_]
| {
"content_hash": "cca8e621b5023143bff486ffb798fc55",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 78,
"avg_line_length": 34.84782608695652,
"alnum_prop": 0.5987523393636931,
"repo_name": "cl4rke/scikit-learn",
"id": "090f03560d6ff76b08f061acf03b7a0d7b3fb9cd",
"size": "8015",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/datasets/rcv1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5916687"
},
{
"name": "Shell",
"bytes": "3952"
}
],
"symlink_target": ""
} |
"""Utility functions."""
import getpass
def ask_input(prompt = '', is_password = False):
"""Keep asking for input until it's empty."""
while True:
answer = getpass.getpass() if is_password == True else input(prompt)
if answer is not '':
return answer
def ask_multiple_option(options, prefix = 'Choose between', prompt = ': '):
"""Keep asking for input until it's empty or not in range."""
def exists(index):
return 0 <= index < len(options)
while True:
print(prefix)
for index, option in enumerate(options):
print(' {} - {}'.format(index + 1, option))
answer = input(prompt).strip()
if answer is not '':
index = int(answer) - 1
if exists(index):
return options[index]
| {
"content_hash": "b37e7cd01aa39687c242ac25afcab53a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 76,
"avg_line_length": 28.17241379310345,
"alnum_prop": 0.5679314565483476,
"repo_name": "frabonomi/instagram-followers-scraper",
"id": "013f48340a828b04244452ee35eec7a30e0382a2",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9777"
}
],
"symlink_target": ""
} |
from time import strftime, gmtime
from logging import getLogger
from os.path import dirname, join as path_join
# pylint: disable=F0401
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, FallbackHandler
from tornado.wsgi import WSGIContainer
from tornado.escape import utf8
from paste.deploy.converters import asbool
# pylint: enable=F0401
from turbulenz_local.lib.multiplayer import MultiplayerHandler, MultiplayerStatusHandler, SessionStatusHandler
from turbulenz_local.lib.responsefromfile import ResponseFromFileHandler
from turbulenz_local.handlers.localv1.save import SaveFileHandler
# pylint: disable=R0904
class DevserverWSGIContainer(WSGIContainer):
logger = getLogger('DevserverWSGIContainer')
new_line = b'\r\n'
empty_string = b''
def __call__(self, request):
parts = []
parts_append = parts.append
base_header = strftime('\r\nDate: %a, %d %b %Y %H:%M:%S GMT', gmtime()) + '\r\nServer: tornado\r\n'
if not request.supports_http_1_1():
if request.headers.get('Connection', '').lower() == 'keep-alive':
base_header += 'Connection: Keep-Alive\r\n'
def start_response(status, response_headers, exc_info=None):
parts_append(utf8('HTTP/1.1 ' + status + base_header))
for key, value in response_headers:
parts_append(utf8(key + ': ' + value + '\r\n'))
parts_append(self.new_line)
return None
environ = WSGIContainer.environ(request)
environ['wsgi.multiprocess'] = False # Some EvalException middleware fails if set to True
app_response = self.wsgi_application(environ, start_response)
if not parts:
raise Exception('WSGI app did not call start_response')
if request.method != 'HEAD':
parts.extend(app_response)
if hasattr(app_response, 'close'):
app_response.close()
app_response = None
if hasattr(request, "connection"):
# Now that the request is finished, clear the callback we
# set on the IOStream (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
request.connection.stream.set_close_callback(None)
request.write(self.empty_string.join(parts))
try:
request.finish()
except IOError as e:
self.logger.error('Exception when writing response: %s', str(e))
def _log(self, status_code, request):
pass
class DevserverApplication(Application):
def log_request(self, handler):
pass
# pylint: enable=R0904
def run(wsgi_app, global_conf,
host='0.0.0.0', port='8080',
multiplayer=False,
testing=False):
port = int(port)
multiplayer = asbool(multiplayer)
testing = asbool(testing)
wsgi_app = DevserverWSGIContainer(wsgi_app)
handlers = []
if multiplayer:
handlers.append(('/multiplayer/(.*)/(.*)', MultiplayerHandler))
handlers.append(('/api/v1/multiplayer/status', MultiplayerStatusHandler))
handlers.append(('/api/v1/multiplayer/status/session/(.*)', SessionStatusHandler))
if testing:
raw_response_dir = path_join(dirname(__file__), 'raw-response')
handlers.append(('/raw-response/(.*)',
ResponseFromFileHandler, dict(path=raw_response_dir)))
handlers.append(('/local/v1/save/([^/]+)/(.*)', SaveFileHandler))
handlers.append(('.*', FallbackHandler, dict(fallback=wsgi_app)))
tornado_app = DevserverApplication(handlers, transforms=[])
handlers = None
server = HTTPServer(tornado_app)
server.listen(port, host)
print 'Serving on %s:%u view at http://127.0.0.1:%u' % (host, port, port)
IOLoop.instance().start()
| {
"content_hash": "47f930f928f1541073c7439b3330fef8",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 110,
"avg_line_length": 34.175438596491226,
"alnum_prop": 0.6519507186858317,
"repo_name": "turbulenz/turbulenz_local",
"id": "d5f3806f46895cbafcf7c730a5935610132861ba",
"size": "3940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbulenz_local/paste_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "282"
},
{
"name": "CSS",
"bytes": "29719"
},
{
"name": "HTML",
"bytes": "54841"
},
{
"name": "JavaScript",
"bytes": "200107"
},
{
"name": "Python",
"bytes": "459206"
}
],
"symlink_target": ""
} |
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import tensorflow.python.platform
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import logging
class QueueRunner(object):
"""Holds a list of enqueue operations for a queue, each to be run in a thread.
Queues are a convenient TensorFlow mechanism to compute tensors
asynchronously using multiple threads. For example in the canonical 'Input
Reader' setup one set of threads generates filenames in a queue; a second set
of threads read records from the files, processes them, and enqueues tensors
on a second queue; a third set of threads dequeues these input records to
construct batches and runs them through training operations.
There are several delicate issues when running multiple threads that way:
closing the queues in sequence as the input is exhausted, correctly catching
and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
"""
def __init__(self, queue, enqueue_ops):
"""Create a QueueRunner.
On construction the `QueueRunner` adds an op to close the queue. That op
will be run if the enqueue ops raise exceptions.
When you later call the `create_threads()` method, the `QueueRunner` will
create one thread for each op in `enqueue_ops`. Each thread will run its
enqueue op in parallel with the other threads. The enqueue ops do not have
to all be the same op, but it is expected that they all enqueue tensors in
`queue`.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
"""
self._queue = queue
self._enqueue_ops = enqueue_ops
# Close when no more will be produced, but pending enqueues should be
# preserved.
self._close_op = self._queue.close()
# Close and cancel pending enqueues since there was an error and we want
# to unblock everything so we can cleanly exit.
self._cancel_op = self._queue.close(cancel_pending_enqueues=True)
# Protect the count of runs to wait for.
self._lock = threading.Lock()
self._runs = 0
# List of exceptions raised by the running threads.
self._exceptions_raised = []
@property
def exceptions_raised(self):
"""Exceptions raised but not handled by the `QueueRunner` threads.
Exceptions raised in queue runner threads are handled in one of two ways
depending on whether or not a `Coordinator` was passed to
`create_threads()`:
* With a `Coordinator`, exceptions are reported to the coordinator and
forgotten by the `QueueRunner`.
* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and
made available in this `exceptions_raised` property.
Returns:
A list of Python `Exception` objects. The list is empty if no exception
was captured. (No exceptions are captured when using a Coordinator.)
"""
return self._exceptions_raised
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
"""
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
sess.run(enqueue_op)
except errors.OutOfRangeError:
# This exception indicates that a queue was closed.
with self._lock:
self._runs -= 1
decremented = True
if self._runs == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs -= 1
def _close_on_stop(self, sess, cancel_op, coord):
"""Close the queue when the Coordinator requests stop.
Args:
sess: A Session.
cancel_op: The Operation to run.
coord: Coordinator.
"""
coord.wait_for_stop()
try:
sess.run(cancel_op)
except Exception as e:
# Intentionally ignore errors from cancel_op.
logging.vlog(1, "Ignored exception: %s", str(e))
# pylint: enable=broad-except
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
This method may be called again as long as all threads from a previous call
have stopped.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
Raises:
RuntimeError: If threads from a previous call to `create_threads()` are
still running.
"""
with self._lock:
if self._runs > 0:
raise RuntimeError(
"Threads are already running from a previous call to Threads() "
"for this queue runner.")
self._runs = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run, args=(sess, op, coord))
for op in self._enqueue_ops]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Adds a `QueueRunner` to a collection in the graph.
When building a complex model that uses many queues it is often difficult to
gather all the queue runners that need to be run. This convenience function
allows you to add a queue runner to a well known collection in the graph.
The companion method `start_queue_runners()` can be used to start threads for
all the collected queue runners.
Args:
qr: A `QueueRunner`.
collection: A `GraphKey` specifying the graph collection to add
the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.
"""
ops.add_to_collection(collection, qr)
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Returns:
A list of threads.
"""
if sess is None:
sess = ops.get_default_session()
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
| {
"content_hash": "ff4601de8087e2ff9b0ffea81ce88eb2",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 80,
"avg_line_length": 36.552742616033754,
"alnum_prop": 0.672630728385086,
"repo_name": "pavlovml/tensorflow",
"id": "6c91a89ab78e1442e1bec5107ad757e2f882d7b1",
"size": "8663",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/queue_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127104"
},
{
"name": "C++",
"bytes": "4910453"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "637366"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45213"
},
{
"name": "Python",
"bytes": "2480267"
},
{
"name": "Shell",
"bytes": "4262"
},
{
"name": "TypeScript",
"bytes": "237684"
}
],
"symlink_target": ""
} |
import logging
from pylons import g, request
from tg import flash, redirect, session, config
from openid.consumer import consumer
from allura import model as M
log = logging.getLogger(__name__)
# openid.oidutil outputs all logging to STDERR unless otherwise configured.
# We follow the openid.oidutil instructions to install our own logging hook.
from openid import oidutil
oidutil.log = log.info
def verify_oid(oid_url, failure_redirect=None, return_to=None,
**kw):
'''Step 1 of OID verification -- redirect to provider site'''
log.info('Trying to login via %s', oid_url)
realm = config.get('openid.realm', 'http://localhost:8080/')
return_to = realm + 'auth/' + return_to
oidconsumer = consumer.Consumer(g.oid_session(), g.oid_store)
try:
req = oidconsumer.begin(oid_url)
except consumer.DiscoveryFailure, ex:
log.exception('Error in openid login')
flash(str(ex[0]), 'error')
redirect(failure_redirect)
if req is None: # pragma no cover
flash('No openid services found for <code>%s</code>' % oid_url,
'error')
redirect(failure_redirect)
if req.shouldSendRedirect():
redirect_url = req.redirectURL(
realm, return_to, False)
log.info('Redirecting to %r', redirect_url)
session.save()
redirect(redirect_url)
else:
return dict(kw, form=req.formMarkup(realm, return_to=return_to))
def process_oid(failure_redirect=None):
oidconsumer = consumer.Consumer(g.oid_session(), g.oid_store)
info = oidconsumer.complete(request.params, request.url)
display_identifier = info.getDisplayIdentifier() or info.identity_url
if info.status == consumer.FAILURE and display_identifier:
# In the case of failure, if info is non-None, it is the
# URL that we were verifying. We include it in the error
# message to help the user figure out what happened.
fmt = "Verification of %s failed: %s"
flash(fmt % (display_identifier, info.message), 'error')
redirect(failure_redirect)
elif info.status == consumer.SUCCESS:
# Success means that the transaction completed without
# error. If info is None, it means that the user cancelled
# the verification.
css_class = 'alert'
# This is a successful verification attempt. If this
# was a real application, we would do our login,
# comment posting, etc. here.
fmt = "You have successfully verified %s as your identity."
message = fmt % display_identifier
if info.endpoint.canonicalID:
# You should authorize i-name users by their canonicalID,
# rather than their more human-friendly identifiers. That
# way their account with you is not compromised if their
# i-name registration expires and is bought by someone else.
message += (" This is an i-name, and its persistent ID is %s"
% info.endpoint.canonicalID )
flash(message, 'info')
elif info.status == consumer.CANCEL:
# cancelled
message = 'Verification cancelled'
flash(message, 'error')
redirect(failure_redirect)
elif info.status == consumer.SETUP_NEEDED:
if info.setup_url:
message = '<a href=%s>Setup needed</a>' % info.setup_url
else:
# This means auth didn't succeed, but you're welcome to try
# non-immediate mode.
message = 'Setup needed'
flash(message, 'error')
redirect(failure_redirect)
else:
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
message = 'Verification failed.'
flash(message, 'error')
redirect(failure_redirect)
session.save()
oid_obj = M.OpenId.upsert(info.identity_url, display_identifier=display_identifier)
return oid_obj
| {
"content_hash": "8476045394dfe9f0f1c849e9b3fafc55",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 87,
"avg_line_length": 42.863157894736844,
"alnum_prop": 0.6453831041257367,
"repo_name": "leotrubach/sourceforge-allura",
"id": "c7e2f633546a07cc726e60484700f3a8d0a5158e",
"size": "4072",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "Allura/allura/lib/oid_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2985957"
},
{
"name": "JavaScript",
"bytes": "650950"
},
{
"name": "Puppet",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "1866436"
},
{
"name": "Ruby",
"bytes": "4109"
},
{
"name": "Shell",
"bytes": "6636"
}
],
"symlink_target": ""
} |
from flask import render_template, request
from websod import app
from websod.models import Integration, Job
from websod.integration import integrations_view, calculate_result, calculate_diff, get_diff
@app.route('/debug')
def debug():
from pprint import pformat
return "<pre>" + pformat(dict(app.config)) + "</pre>"
@app.route('/')
def home():
"""shows integrations time graph and latest integrations with diff """
session = app.db.session
limit = request.args.get('limit', 50, int)
integrations = session.query(Integration).order_by(Integration.id.desc()).limit(limit).all()
integrations_view(integrations)
return render_template('integration_list.html', integrations=integrations,
history=Integration.get_elapsed_history(session))
@app.route('/integration/')
def integration_list():
"""shows all integrations with diff """
session = app.db.session
integrations = session.query(Integration).order_by(Integration.id.desc()).all()
integrations_view(integrations)
return render_template('integration_list.html',
integrations=integrations,
history='')
@app.route('/integration/<int:id_>')
def integration(id_):
"""integration page just show list of jobs with their result"""
integration = app.db.session.query(Integration).get(id_)
# collect the failed jobs
failed_jobs = integration.getJobsByResult("fail")
unstable_jobs = integration.getJobsByResult("unstable")
success_jobs = integration.getJobsByResult("success")
tpl_data = {'integration': integration,
'failed_jobs': sorted(failed_jobs, key=lambda k: k.name),
'unstable_jobs': sorted(unstable_jobs, key=lambda k: k.name),
'success_jobs': sorted(success_jobs, key=lambda k: k.name)}
return render_template('integration.html', **tpl_data)
@app.route('/job/<int:id_>')
def job(id_):
session = app.db.session
the_job = session.query(Job).get(id_)
elapsed_history = the_job.get_elapsed_history(session)
return render_template('job.html', job=the_job, history=elapsed_history)
# this is supposed to be called by sodd's to notify when a job_group is done
@app.route('/group_finished/<int:integration_id>')
def group_finished(integration_id):
session = app.db.session
integration = session.query(Integration).get(integration_id)
# calcualte
try:
calculate_result(integration)
calculate_diff(integration)
except Integration.IntegrationException, exception:
return str(exception)
session.commit()
# post integration
get_diff(integration)
# execute post-integration functions
for setup in app.config['post-integration']:
try:
module_name, fun_name = setup.split(':')
module = __import__(module_name)
function = getattr(module, fun_name)
function(integration)
except Exception, e:
print "Error executing post-integration %s" % setup
print str(e)
# TODO include traceback
print
return integration.result
| {
"content_hash": "ab217275b1764598dc607cc373d43c70",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 96,
"avg_line_length": 34.86813186813187,
"alnum_prop": 0.6637251812165144,
"repo_name": "schettino72/serveronduty",
"id": "3f1429366e42dc8584421d3fcf0c636e8fb84bd4",
"size": "3173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "websod/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1991"
},
{
"name": "Python",
"bytes": "141677"
}
],
"symlink_target": ""
} |
"""Google Cloud Print Monitor
Send an alert if printers are offline.
Run this script first with the "register" parameter:
sudo -u sma ./google-cloud-print.py register
"""
import getpass
import os
import sys
import webbrowser
import json
from json.decoder import JSONDecodeError
import requests
import time
from simple_monitor_alert.sma import get_var_directory
GRANT_TYPE = 'http://oauth.net/grant_type/device/1.0'
GRANT_TYPE_REFRESH = 'refresh_token'
SCOPE = 'https://www.googleapis.com/auth/cloudprint'
REGISTER_NEW_PROJECT_URL = 'https://console.developers.google.com/iam-admin/projects'
OAUTH_DEVICE_CODE_URL = 'https://accounts.google.com/o/oauth2/device/code'
OAUTH_TOKEN_URL = 'https://www.googleapis.com/oauth2/v4/token'
SEARCH_PRINTS = 'https://www.google.com/cloudprint/search'
var_directory = get_var_directory()
auth_file = os.path.join(var_directory, 'google-cloud-print.json')
if sys.version_info < (3, 0):
input = raw_input
class Auth(requests.auth.AuthBase):
def __init__(self, file=None):
self.file = file or auth_file
if not self.file:
print('You need execute "register". Look at the instructions.')
raise SystemExit
self.data = json.load(open(self.file))
def renew(self):
r = requests.post(OAUTH_TOKEN_URL, {'client_secret': self.data['client_secret'],
'client_id': self.data['client_id'], 'grant_type': GRANT_TYPE_REFRESH,
'refresh_token': self.data['refresh_token']})
self.data.update(r.json())
self.save()
def save(self):
json.dump(self.data, open(self.file, 'w'))
def __call__(self, r):
r.headers['Authorization'] = '{token_type} {access_token}'.format(**self.data)
return r
def register():
print('You need to create access credentials to Google Cloud Print.')
print('1. First, go to: {}'.format(REGISTER_NEW_PROJECT_URL))
webbrowser.open(REGISTER_NEW_PROJECT_URL)
print('2. Create a new project.')
print('3. Go to "credentials" and create a new Oauth ID Client".')
print('Credentials url: https://console.developers.google.com/apis/credentials')
print('4. Select "other" in application type. You may need to create an authentication screen before.')
print('5. Now you must have: client ID and client Secret.')
print('Client ID example: 137998618127-d0p9bh4k48gugfh0l50u59okri7bd2xr.apps.googleusercontent.com')
print('Client Secret example: 1kuJGYaCEWpGIcqTP7gNHd5c')
time.sleep(2)
client_id = input('Insert client ID: ')
client_secret = getpass.getpass('Insert Client Secret (hidden): ')
data = {}
while True:
r = requests.post(OAUTH_DEVICE_CODE_URL, {'scope': SCOPE, 'client_id': client_id})
data = r.json()
print('Go to {verification_url} and insert: {user_code}'.format(**data))
print('INSERT CODE ==========> {} <========== INSERT CODE'.format(data['user_code']))
webbrowser.open(data['verification_url'])
time.sleep(2)
print("[!!] Press enter when ready")
input('[Press enter]')
r = requests.post(OAUTH_TOKEN_URL, {'client_secret': client_secret, 'code': data['device_code'],
'client_id': client_id, 'grant_type': GRANT_TYPE})
data = r.json()
if data.get('error'):
print('Error: {error}'.format(**data))
print('Sorry, It will start again.')
else:
print('Great!')
break
data.update({'client_id': client_id, 'client_secret': client_secret})
json.dump(data, open(auth_file, 'w'))
print('Created {}'.format(auth_file))
def get_prints(auth=None):
auth = auth or Auth()
r = None
for i in range(2):
r = requests.get(SEARCH_PRINTS, auth=auth)
if r.status_code == 403 and i == 1:
raise SystemExit('Renew token failed.')
elif r.status_code == 403:
auth.renew()
else:
break
data = r.json()
for printer in data['printers']:
if printer['name'] in ['Save to Google Docs']:
continue
print('printer_status({id}).name = "{displayName}"'.format(**printer))
print('printer_status({id}).expected = "ONLINE"'.format(**printer))
print('printer_status({id}).value = "{connectionStatus}"'.format(**printer))
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'register':
register()
else:
get_prints()
| {
"content_hash": "7e3281d9ad89c1bad111eb1ab58e75b2",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 114,
"avg_line_length": 38.319327731092436,
"alnum_prop": 0.6182017543859649,
"repo_name": "Nekmo/simple-monitor-alert",
"id": "6f9f35e90705055b51e5a4c58b36fb65170cb5f1",
"size": "4582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitors/google-cloud-print.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90820"
},
{
"name": "Shell",
"bytes": "8075"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
__author__ = 'shafferm'
def sparcc_paper_filter(table):
"""if a observation averages more than 2 reads per sample then keep,
if a sample has more than 500 reads then keep"""
table = table[table.sum(axis=1) > 500]
table = table.loc[:, table.mean(axis=0) > 2]
return table
def min_sample_filter(table, min_samples):
"""remove observations not present in more than a minimum number of samples"""
zeroes_per_column = (table > 0).sum(axis=0)
return table.loc[:, zeroes_per_column > min_samples]
def bh_adjust(pvalues):
"""benjamini-hochberg p-value adjustment stolen from
http://stackoverflow.com/questions/7450957/how-to-implement-rs-p-adjust-in-python
"""
pvalues = np.array(pvalues)
n = pvalues.shape[0]
new_pvalues = np.empty(n)
values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]
values.sort()
values.reverse()
new_values = []
for i, vals in enumerate(values):
rank = n - i
pvalue, index = vals
new_values.append((n/rank) * pvalue)
for i in xrange(0, int(n)-1):
if new_values[i] < new_values[i+1]:
new_values[i+1] = new_values[i]
for i, vals in enumerate(values):
pvalue, index = vals
new_pvalues[index] = new_values[i]
return new_pvalues
def bonferroni_adjust(pvalues):
pvalues = np.array(pvalues)
n = float(pvalues.shape[0])
new_pvalues = n * pvalues
return new_pvalues
def biom_to_pandas(table):
# convert to pandas dataframe
return pd.DataFrame(np.transpose(table.matrix_data.todense()), index=table.ids(), columns=table.ids(axis="observation"))
def df_to_correls(cor):
header = ['feature1', 'feature2', 'r']
correls = list()
for i in xrange(len(cor.index)):
for j in xrange(i+1, len(cor.index)):
correls.append([str(cor.index[i]), str(cor.index[j]), cor.iat[i, j]])
return pd.DataFrame(correls, columns=header)
| {
"content_hash": "2bb78e32dc93343767694aca335acdaf",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 124,
"avg_line_length": 31,
"alnum_prop": 0.641633064516129,
"repo_name": "shafferm/fast_sparCC",
"id": "6a073dc3903e59741213920edad6f99f60832e12",
"size": "1984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparcc_fast/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20686"
}
],
"symlink_target": ""
} |
import os
import h5py
from scipy.io import loadmat
from fuel.converters.base import fill_hdf5_file, MissingInputFiles
def convert_silhouettes(size, directory, output_directory,
output_file=None):
""" Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_file : str
Where to save the converted dataset.
"""
if size not in (16, 28):
raise ValueError('size must be 16 or 28')
if output_file is None:
output_file = 'caltech101_silhouettes{}.hdf5'.format(size)
output_file = os.path.join(output_directory, output_file)
input_file = 'caltech101_silhouettes_{}_split1.mat'.format(size)
input_file = os.path.join(directory, input_file)
if not os.path.isfile(input_file):
raise MissingInputFiles('Required files missing', [input_file])
with h5py.File(output_file, mode="w") as h5file:
mat = loadmat(input_file)
train_features = mat['train_data'].reshape([-1, 1, size, size])
train_targets = mat['train_labels']
valid_features = mat['val_data'].reshape([-1, 1, size, size])
valid_targets = mat['val_labels']
test_features = mat['test_data'].reshape([-1, 1, size, size])
test_targets = mat['test_labels']
data = (
('train', 'features', train_features),
('train', 'targets', train_targets),
('valid', 'features', valid_features),
('valid', 'targets', valid_targets),
('test', 'features', test_features),
('test', 'targets', test_targets),
)
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
return (output_file,)
def fill_subparser(subparser):
"""Sets up a subparser to convert CalTech101 Silhouettes Database files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `caltech101_silhouettes` command.
"""
subparser.add_argument(
"size", type=int, choices=(16, 28),
help="height/width of the datapoints")
subparser.set_defaults(func=convert_silhouettes)
| {
"content_hash": "2159b0998e4904cb57dddc953b7b6ec1",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 33.14473684210526,
"alnum_prop": 0.6137356093687971,
"repo_name": "rizar/fuel",
"id": "c8b8328b5b400f62c2d2564c1265a16ec9f7c310",
"size": "2519",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "fuel/converters/caltech101_silhouettes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "284103"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
import logging
from base64 import urlsafe_b64encode
from satosa.context import Context
from satosa.internal import InternalData
from .base import RequestMicroService
from ..exception import SATOSAConfigurationError
from ..exception import SATOSAError
logger = logging.getLogger(__name__)
class CustomRoutingError(SATOSAError):
"""SATOSA exception raised by CustomRouting rules"""
pass
class DecideBackendByTargetIssuer(RequestMicroService):
"""
Select target backend based on the target issuer.
"""
def __init__(self, config:dict, *args, **kwargs):
"""
Constructor.
:param config: microservice configuration loaded from yaml file
:type config: Dict[str, Dict[str, str]]
"""
super().__init__(*args, **kwargs)
self.target_mapping = config['target_mapping']
self.default_backend = config['default_backend']
def process(self, context:Context, data:InternalData):
"""Set context.target_backend based on the target issuer"""
target_issuer = context.get_decoration(Context.KEY_TARGET_ENTITYID)
if not target_issuer:
logger.info('skipping backend decision because no target_issuer was found')
return super().process(context, data)
target_backend = (
self.target_mapping.get(target_issuer)
or self.default_backend
)
report = {
'msg': 'decided target backend by target issuer',
'target_issuer': target_issuer,
'target_backend': target_backend,
}
logger.info(report)
context.target_backend = target_backend
return super().process(context, data)
class DecideBackendByRequester(RequestMicroService):
"""
Select which backend should be used based on who the requester is.
"""
def __init__(self, config, *args, **kwargs):
"""
Constructor.
:param config: mapping from requester identifier to
backend module name under the key 'requester_mapping'
:type config: Dict[str, Dict[str, str]]
"""
super().__init__(*args, **kwargs)
self.requester_mapping = config['requester_mapping']
def process(self, context, data):
"""
Will modify the context.target_backend attribute based on the requester identifier.
:param context: request context
:param data: the internal request
"""
context.target_backend = self.requester_mapping[data.requester]
return super().process(context, data)
class DecideIfRequesterIsAllowed(RequestMicroService):
"""
Decide whether a requester is allowed to send an authentication request to the target entity.
This micro service currently only works when a target entityid is set.
Currently, a target entityid is set only when the `SAMLMirrorFrontend` is
used.
"""
def __init__(self, config, *args, **kwargs):
super().__init__(*args, **kwargs)
for target_entity, rules in config["rules"].items():
conflicting_rules = set(rules.get("deny", [])).intersection(rules.get("allow", []))
if conflicting_rules:
raise SATOSAConfigurationError("Conflicting requester rules for DecideIfRequesterIsAllowed,"
"{} is both denied and allowed".format(conflicting_rules))
# target entity id is base64 url encoded to make it usable in URLs,
# so we convert the rules the use those encoded entity id's instead
self.rules = {self._b64_url(k): v for k, v in config["rules"].items()}
def _b64_url(self, data):
return urlsafe_b64encode(data.encode("utf-8")).decode("utf-8")
def process(self, context, data):
target_entity_id = context.get_decoration(Context.KEY_TARGET_ENTITYID)
if None is target_entity_id:
msg = "{name} can only be used when a target entityid is set".format(
name=self.__class__.__name__
)
logger.error(msg)
raise SATOSAError(msg)
target_specific_rules = self.rules.get(target_entity_id)
# default to allowing everything if there are no specific rules
if not target_specific_rules:
logger.debug("Requester '{}' allowed by default to target entity '{}' due to no entity specific rules".format(
data.requester, target_entity_id
))
return super().process(context, data)
# deny rules takes precedence
deny_rules = target_specific_rules.get("deny", [])
if data.requester in deny_rules:
logger.debug("Requester '{}' is not allowed by target entity '{}' due to deny rules '{}'".format(
data.requester, target_entity_id, deny_rules
))
raise SATOSAError("Requester is not allowed by target provider")
allow_rules = target_specific_rules.get("allow", [])
allow_all = "*" in allow_rules
if data.requester in allow_rules or allow_all:
logger.debug("Requester '{}' allowed by target entity '{}' due to allow rules '{}".format(
data.requester, target_entity_id, allow_rules
))
return super().process(context, data)
logger.debug("Requester '{}' is not allowed by target entity '{}' due to final deny all rule in '{}'".format(
data.requester, target_entity_id, deny_rules
))
raise SATOSAError("Requester is not allowed by target provider")
| {
"content_hash": "8d28bd15702115e48d3fe410acf502ae",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 122,
"avg_line_length": 38.082191780821915,
"alnum_prop": 0.6298561151079136,
"repo_name": "SUNET/SATOSA",
"id": "541b824f1c0383aaa924a39cf618b7375ed9561c",
"size": "5560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/satosa/micro_services/custom_routing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "511690"
},
{
"name": "Shell",
"bytes": "1866"
}
],
"symlink_target": ""
} |
import sys, os
from datetime import datetime
from twilio import __version_info__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
RTD_NEW_THEME = True
# Load the source for autodoc
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '..')))
# So links to Python default docs work
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'twilio-python'
copyright = unicode(datetime.utcnow().year) + u', Twilio Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version_info__[:2])
# The full version, including alpha/beta/rc tags.
release = '.'.join(__version_info__)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
sys.path.append(os.path.abspath('_themes'))
#html_theme_path = ['_themes']
#html_theme = 'kr'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'twilio-pythondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'twilio-python.tex', u'twilio-python Documentation',
u'Twilio Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'twilio-python', u'twilio-python Documentation',
[u'Twilio Inc.'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'twilio-python'
epub_author = u'[email protected]'
epub_publisher = u'Twilio Inc.'
epub_copyright = u'2013, Twilio Inc.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| {
"content_hash": "d553ba8e8fa6b9691bbfe27bd35e9266",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 80,
"avg_line_length": 31.71153846153846,
"alnum_prop": 0.7018799272286234,
"repo_name": "supermanheng21/twilio-python",
"id": "38ff9f8ee98af608af3d097ee6601d3f56f21747",
"size": "8670",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "887"
},
{
"name": "Python",
"bytes": "375426"
}
],
"symlink_target": ""
} |
class HorasInfo:
def __init__(self):
self.total = 0;
self.doneFinished = 0;
self.doneReal = 0;
self.blockedTotal = 0;
self.blockedReal = 0;
self.statesH = {}
self.proyectsH = {}
class SprintStats:
def __init__(self):
self.title = ""
self.startDate = "";
self.endDate = "";
self.festivos = [];
self.horasSprint = HorasInfo();
self.horasSupport = HorasInfo();
def toDict(self):
dD = self.__dict__.copy();
dD["horasSprint"] = self.horasSprint.__dict__;
dD["horasSupport"] = self.horasSupport.__dict__;
return dD;
| {
"content_hash": "102f8449e3485112c531092eaefb4f71",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 56,
"avg_line_length": 27.5,
"alnum_prop": 0.5212121212121212,
"repo_name": "grcanosa/code-playground",
"id": "c1cdf7ca944c0ac95a04afbcfde3c3389d1b0605",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrum/redminecsv/sprintstats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6793"
},
{
"name": "JavaScript",
"bytes": "6939"
},
{
"name": "Jupyter Notebook",
"bytes": "10972"
},
{
"name": "Python",
"bytes": "40749"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import numpy as np
from bct.utils import BCTParamError, binarize
from bct.utils import pick_four_unique_nodes_quickly
from .clustering import number_of_components
def latmio_dir_connected(R, itr, D=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions. The
function also ensures that the randomized network maintains
connectedness, the ability for every node to reach every other node in
the network. The input network for this function must be connected.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
n = len(R)
ind_rp = np.random.permutation(n) # random permutation of nodes
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
# connectedness condition
if not (np.any((R[a, c], R[d, b], R[d, c])) and
np.any((R[c, a], R[b, d], R[b, a]))):
P = R[(a, c), :].copy()
P[0, b] = 0
P[0, d] = 1
P[1, d] = 0
P[1, b] = 1
PN = P.copy()
PN[0, a] = 1
PN[1, c] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
PN += P
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(PN[0, (b, c)]) and np.any(PN[1, (d, a)]):
break
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff
def latmio_dir(R, itr, D=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
n = len(R)
ind_rp = np.random.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff
def latmio_und_connected(R, itr, D=None):
'''
This function "latticizes" an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks. The function also ensures that the
randomized network maintains connectedness, the ability for every node
to reach every other node in the network. The input network for this
function must be connected.
Parameters
----------
R : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
if number_of_components(R) > 1:
raise BCTParamError("Input is not connected")
n = len(R)
ind_rp = np.random.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1) / 2))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
# connectedness condition
if not (R[a, c] or R[b, d]):
P = R[(a, d), :].copy()
P[0, b] = 0
P[1, c] = 0
PN = P.copy()
PN[:, d] = 1
PN[:, a] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(P[:, (b, c)]):
break
PN += P
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])]
return Rlatt, R, ind_rp, eff
def latmio_und(R, itr, D=None):
'''
This function "latticizes" an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
R : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
n = len(R)
ind_rp = np.random.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1) / 2))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])]
return Rlatt, R, ind_rp, eff
def makeevenCIJ(n, k, sz_cl):
'''
This function generates a random, directed network with a specified
number of fully connected modules linked together by evenly distributed
remaining random connections.
Parameters
----------
N : int
number of vertices (must be power of 2)
K : int
number of edges
sz_cl : int
size of clusters (must be power of 2)
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
N must be a power of 2.
A warning is generated if all modules contain more edges than K.
Cluster size is 2^sz_cl;
'''
# compute number of hierarchical levels and adjust cluster size
mx_lvl = int(np.floor(np.log2(n)))
sz_cl -= 1
# make a stupid little template
t = np.ones((2, 2)) * 2
# check n against the number of levels
Nlvl = 2**mx_lvl
if Nlvl != n:
print("Warning: n must be a power of 2")
n = Nlvl
# create hierarchical template
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
CIJp = (CIJ >= (mx_lvl - sz_cl))
# determine nr of non-cluster connections left and their possible positions
rem_k = k - np.size(np.where(CIJp.flatten()))
if rem_k < 0:
print("Warning: K is too small, output matrix contains clusters only")
return CIJp
a, b = np.where(np.logical_not(CIJp + np.eye(n)))
# assign remK randomly dstributed connections
rp = np.random.permutation(len(a))
a = a[rp[:rem_k]]
b = b[rp[:rem_k]]
for ai, bi in zip(a, b):
CIJp[ai, bi] = 1
return np.array(CIJp, dtype=int)
def makefractalCIJ(mx_lvl, E, sz_cl):
'''
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
'''
# make a stupid little template
t = np.ones((2, 2)) * 2
# compute N and cluster size
n = 2**mx_lvl
sz_cl -= 1
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
ee = mx_lvl - CIJ - sz_cl
ee = (ee > 0) * ee
prob = (1 / E**ee) * (np.ones((s, s)) - np.eye(s))
CIJ = (prob > np.random.random((n, n)))
# count connections
k = np.sum(CIJ)
return np.array(CIJ, dtype=int), k
def makerandCIJdegreesfixed(inv, outv):
'''
This function generates a directed random network with a specified
in-degree and out-degree sequence.
Parameters
----------
inv : Nx1 np.ndarray
in-degree vector
outv : Nx1 np.ndarray
out-degree vector
Returns
-------
CIJ : NxN np.ndarray
Notes
-----
Necessary conditions include:
length(in) = length(out) = n
sum(in) = sum(out) = k
in(i), out(i) < n-1
in(i) + out(j) < n+2
in(i) + out(i) < n
No connections are placed on the main diagonal
The algorithm used in this function is not, technically, guaranteed to
terminate. If a valid distribution of in and out degrees is provided,
this function will find it in bounded time with probability
1-(1/(2*(k^2))). This turns out to be a serious problem when
computing infinite degree matrices, but offers good performance
otherwise.
'''
n = len(inv)
k = np.sum(inv)
in_inv = np.zeros((k,))
out_inv = np.zeros((k,))
i_in = 0
i_out = 0
for i in range(n):
in_inv[i_in:i_in + inv[i]] = i
out_inv[i_out:i_out + outv[i]] = i
i_in += inv[i]
i_out += outv[i]
CIJ = np.eye(n)
edges = np.array((out_inv, in_inv[np.random.permutation(k)]))
# create CIJ and check for double edges and self connections
for i in range(k):
if CIJ[edges[0, i], edges[1, i]]:
tried = set()
while True:
if len(tried) == k:
raise BCTParamError('Could not resolve the given '
'in and out vectors')
switch = np.random.randint(k)
while switch in tried:
switch = np.random.randint(k)
if not (CIJ[edges[0, i], edges[1, switch]] or
CIJ[edges[0, switch], edges[1, i]]):
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
if switch < i:
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
t = edges[1, i]
edges[1, i] = edges[1, switch]
edges[1, switch] = t
break
tried.add(switch)
else:
CIJ[edges[0, i], edges[1, i]] = 1
CIJ -= np.eye(n)
return CIJ
def makerandCIJ_dir(n, k):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = np.random.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
def makerandCIJ_und(n, k):
'''
This function generates an undirected random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
Returns
-------
CIJ : NxN np.ndarray
undirected random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
ix, = np.where(np.triu(np.logical_not(np.eye(n))).flat)
rp = np.random.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
def makeringlatticeCIJ(n, k):
'''
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'''
# initialize
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
# fill in
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
# remove excess connections
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = np.random.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ
def maketoeplitzCIJ(n, k, s):
'''
This function generates a directed network with a Gaussian drop-off in
edge density with increasing distance from the main diagonal. There are
toroidal boundary counditions (i.e. no ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
s : float
standard deviation of toeplitz
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
from scipy import linalg, stats
pf = stats.norm.pdf(range(1, n), .5, s)
template = linalg.toeplitz(np.append((0,), pf), r=np.append((0,), pf))
template *= (k / np.sum(template))
CIJ = np.zeros((n, n))
itr = 0
while np.sum(CIJ) != k:
CIJ = (np.random.random((n, n)) < template)
itr += 1
if itr > 10000:
raise BCTParamError('Infinite loop was caught generating toeplitz '
'matrix. This means the matrix could not be resolved with the '
'specified parameters.')
return CIJ
def null_model_dir_sign(W, bin_swaps=5, wei_freq=.1):
'''
This function randomizes an directed network with positive and
negative weights, while preserving the degree and strength
distributions. This function calls randmio_dir.m
Parameters
----------
W : NxN np.ndarray
directed weighted connection matrix
bin_swaps : int
average number of swaps in each edge binary randomization. Default
value is 5. 0 swaps implies no binary randomization.
wei_freq : float
frequency of weight sorting in weighted randomization. 0<=wei_freq<1.
wei_freq == 1 implies that weights are sorted at each step.
wei_freq == 0.1 implies that weights sorted each 10th step (faster,
default value)
wei_freq == 0 implies no sorting of weights (not recommended)
Returns
-------
W0 : NxN np.ndarray
randomized weighted connection matrix
R : 4-tuple of floats
Correlation coefficients between strength sequences of input and
output connection matrices, rpos_in, rpos_out, rneg_in, rneg_out
Notes
-----
The value of bin_swaps is ignored when binary topology is fully
connected (e.g. when the network has no negative weights).
Randomization may be better (and execution time will be slower) for
higher values of bin_swaps and wei_freq. Higher values of bin_swaps may
enable a more random binary organization, and higher values of wei_freq
may enable a more accurate conservation of strength sequences.
R are the correlation coefficients between positive and negative
in-strength and out-strength sequences of input and output connection
matrices and are used to evaluate the accuracy with which strengths
were preserved. Note that correlation coefficients may be a rough
measure of strength-sequence accuracy and one could implement more
formal tests (such as the Kolmogorov-Smirnov test) if desired.
'''
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Ap = (W > 0) # positive adjmat
if np.size(np.where(Ap.flat)) < (n * (n - 1)):
W_r = randmio_und_signed(W, bin_swaps)
Ap_r = W_r > 0
An_r = W_r < 0
else:
Ap_r = Ap
An_r = An
W0 = np.zeros((n, n))
for s in (1, -1):
if s == 1:
Acur = Ap
A_rcur = Ap_r
else:
Acur = An
A_rcur = An_r
Si = np.sum(W * Acur, axis=0) # positive in-strength
So = np.sum(W * Acur, axis=1) # positive out-strength
Wv = np.sort(W[Acur].flat) # sorted weights vector
i, j = np.where(A_rcur)
Lij, = np.where(A_rcur.flat) # weights indices
P = np.outer(So, Si)
if wei_freq == 0: # get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij]) # assign corresponding sorted
W0.flat[Lij[Oind]] = s * Wv # weight at this index
else:
wsize = np.size(Wv)
wei_period = np.round(1 / wei_freq) # convert frequency to period
lq = np.arange(wsize, 0, -wei_period, dtype=int)
for m in lq: # iteratively explore at this period
# get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij])
R = np.random.permutation(m)[:np.min((m, wei_period))]
for q, r in enumerate(R):
# choose random index of sorted expected weight
o = Oind[r]
W0.flat[Lij[o]] = s * Wv[r] # assign corresponding weight
# readjust expected weighted probability for i[o],j[o]
f = 1 - Wv[r] / So[i[o]]
P[i[o], :] *= f
f = 1 - Wv[r] / So[j[o]]
P[j[o], :] *= f
# readjust in-strength of i[o]
So[i[o]] -= Wv[r]
# readjust out-strength of j[o]
Si[j[o]] -= Wv[r]
O = Oind[R]
# remove current indices from further consideration
Lij = np.delete(Lij, O)
i = np.delete(i, O)
j = np.delete(j, O)
Wv = np.delete(Wv, O)
rpos_in = np.corrcoef(np.sum(W * (W > 0), axis=0),
np.sum(W0 * (W0 > 0), axis=0))
rpos_ou = np.corrcoef(np.sum(W * (W > 0), axis=1),
np.sum(W0 * (W0 > 0), axis=1))
rneg_in = np.corrcoef(np.sum(-W * (W < 0), axis=0),
np.sum(-W0 * (W0 < 0), axis=0))
rneg_ou = np.corrcoef(np.sum(-W * (W < 0), axis=1),
np.sum(-W0 * (W0 < 0), axis=1))
return W0, (rpos_in[0, 1], rpos_ou[0, 1], rneg_in[0, 1], rneg_ou[0, 1])
def null_model_und_sign(W, bin_swaps=5, wei_freq=.1):
'''
This function randomizes an undirected network with positive and
negative weights, while preserving the degree and strength
distributions. This function calls randmio_und.m
Parameters
----------
W : NxN np.ndarray
undirected weighted connection matrix
bin_swaps : int
average number of swaps in each edge binary randomization. Default
value is 5. 0 swaps implies no binary randomization.
wei_freq : float
frequency of weight sorting in weighted randomization. 0<=wei_freq<1.
wei_freq == 1 implies that weights are sorted at each step.
wei_freq == 0.1 implies that weights sorted each 10th step (faster,
default value)
wei_freq == 0 implies no sorting of weights (not recommended)
Returns
-------
W0 : NxN np.ndarray
randomized weighted connection matrix
R : 4-tuple of floats
Correlation coefficients between strength sequences of input and
output connection matrices, rpos_in, rpos_out, rneg_in, rneg_out
Notes
-----
The value of bin_swaps is ignored when binary topology is fully
connected (e.g. when the network has no negative weights).
Randomization may be better (and execution time will be slower) for
higher values of bin_swaps and wei_freq. Higher values of bin_swaps
may enable a more random binary organization, and higher values of
wei_freq may enable a more accurate conservation of strength
sequences.
R are the correlation coefficients between positive and negative
strength sequences of input and output connection matrices and are
used to evaluate the accuracy with which strengths were preserved.
Note that correlation coefficients may be a rough measure of
strength-sequence accuracy and one could implement more formal tests
(such as the Kolmogorov-Smirnov test) if desired.
'''
if not np.all(W == W.T):
raise BCTParamError("Input must be undirected")
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Ap = (W > 0) # positive adjmat
if np.size(np.where(Ap.flat)) < (n * (n - 1)):
W_r = randmio_und_signed(W, bin_swaps)
Ap_r = W_r > 0
An_r = W_r < 0
else:
Ap_r = Ap
An_r = An
W0 = np.zeros((n, n))
for s in (1, -1):
if s == 1:
Acur = Ap
A_rcur = Ap_r
else:
Acur = An
A_rcur = An_r
S = np.sum(W * Acur, axis=0) # strengths
Wv = np.sort(W[np.where(np.triu(Acur))]) # sorted weights vector
i, j = np.where(np.triu(A_rcur))
Lij, = np.where(np.triu(A_rcur).flat) # weights indices
P = np.outer(S, S)
if wei_freq == 0: # get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij]) # assign corresponding sorted
W0.flat[Lij[Oind]] = s * Wv # weight at this index
else:
wsize = np.size(Wv)
wei_period = np.round(1 / wei_freq) # convert frequency to period
lq = np.arange(wsize, 0, -wei_period, dtype=int)
for m in lq: # iteratively explore at this period
# get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij])
R = np.random.permutation(m)[:np.min((m, wei_period))]
for q, r in enumerate(R):
# choose random index of sorted expected weight
o = Oind[r]
W0.flat[Lij[o]] = s * Wv[r] # assign corresponding weight
# readjust expected weighted probability for i[o],j[o]
f = 1 - Wv[r] / S[i[o]]
P[i[o], :] *= f
P[:, i[o]] *= f
f = 1 - Wv[r] / S[j[o]]
P[j[o], :] *= f
P[:, j[o]] *= f
# readjust strength of i[o]
S[i[o]] -= Wv[r]
# readjust strength of j[o]
S[j[o]] -= Wv[r]
O = Oind[R]
# remove current indices from further consideration
Lij = np.delete(Lij, O)
i = np.delete(i, O)
j = np.delete(j, O)
Wv = np.delete(Wv, R)
W0 = W0 + W0.T
rpos_in = np.corrcoef(np.sum(W * (W > 0), axis=0),
np.sum(W0 * (W0 > 0), axis=0))
rpos_ou = np.corrcoef(np.sum(W * (W > 0), axis=1),
np.sum(W0 * (W0 > 0), axis=1))
rneg_in = np.corrcoef(np.sum(-W * (W < 0), axis=0),
np.sum(-W0 * (W0 < 0), axis=0))
rneg_ou = np.corrcoef(np.sum(-W * (W < 0), axis=1),
np.sum(-W0 * (W0 < 0), axis=1))
return W0, (rpos_in[0, 1], rpos_ou[0, 1], rneg_in[0, 1], rneg_ou[0, 1])
def randmio_dir_connected(R, itr):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions. The
function also ensures that the randomized network maintains
connectedness, the ability for every node to reach every other node in
the network. The input network for this function must be connected.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
# connectedness condition
if not (np.any((R[a, c], R[d, b], R[d, c])) and
np.any((R[c, a], R[b, d], R[b, a]))):
P = R[(a, c), :].copy()
P[0, b] = 0
P[0, d] = 1
P[1, d] = 0
P[1, b] = 1
PN = P.copy()
PN[0, a] = 1
PN[1, c] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
PN += P
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(PN[0, (b, c)]) and np.any(PN[1, (d, a)]):
break
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d # reassign edge indices
j[e2] = b
eff += 1
break
att += 1
return R, eff
def randmio_dir(R, itr):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
i.setflags(write=True)
j.setflags(write=True)
i[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_und_connected(R, itr):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks. The function also ensures that the
randomized network maintains connectedness, the ability for every node
to reach every other node in the network. The input network for this
function must be connected.
NOTE the changes to the BCT matlab function of the same name
made in the Jan 2016 release
have not been propagated to this function because of substantially
decreased time efficiency in the implementation. Expect these changes
to be merged eventually.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
if number_of_components(R) > 1:
raise BCTParamError("Input is not connected")
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# connectedness condition
if not (R[a, c] or R[b, d]):
P = R[(a, d), :].copy()
P[0, b] = 0
P[1, c] = 0
PN = P.copy()
PN[:, d] = 1
PN[:, a] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(P[:, (b, c)]):
break
PN += P
# end connectedness testing
if rewire:
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_dir_signed(R, itr):
'''
This function randomizes a directed weighted network with positively
and negatively signed connections, while preserving the positive and
negative degree distributions. In weighted networks by default the
function preserves the out-degree strength but not the in-strength
distributions
Parameters
---------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
R = R.copy()
n = len(R)
itr *= n * (n - 1)
#maximal number of rewiring attempts per iter
max_attempts = n
#actual number of successful rewirings
eff = 0
#print(itr)
for it in range(itr):
#print(it)
att = 0
while att <= max_attempts:
#select four distinct vertices
a, b, c, d = pick_four_unique_nodes_quickly(n)
#a, b, c, d = np.random.choice(n, 4)
#a, b, c, d = np.random.permutation(4)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#print(np.sign(r0_ab), np.sign(r0_ad))
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = r0_ab
R[a, b] = r0_ad
R[c, b] = r0_cd
R[c, d] = r0_cb
eff += 1
break
att += 1
#print(eff)
return R, eff
def randmio_und(R, itr):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1, e2 = np.random.randint(k, size=(2,))
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_und_signed(R, itr):
'''
This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
'''
R = R.copy()
n = len(R)
itr *= int(n * (n -1) / 2)
max_attempts = int(np.round(n / 2))
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
a, b, c, d = pick_four_unique_nodes_quickly(n)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = R[d, a] = r0_ab
R[a, b] = R[b, a] = r0_ad
R[c, b] = R[b, c] = r0_cd
R[c, d] = R[d, c] = r0_cb
eff += 1
break
att += 1
return R, eff
def randomize_graph_partial_und(A, B, maxswap):
'''
A = RANDOMIZE_GRAPH_PARTIAL_UND(A,B,MAXSWAP) takes adjacency matrices A
and B and attempts to randomize matrix A by performing MAXSWAP
rewirings. The rewirings will avoid any spots where matrix B is
nonzero.
Parameters
----------
A : NxN np.ndarray
undirected adjacency matrix to randomize
B : NxN np.ndarray
mask; edges to avoid
maxswap : int
number of rewirings
Returns
-------
A : NxN np.ndarray
randomized matrix
Notes
-----
1. Graph may become disconnected as a result of rewiring. Always
important to check.
2. A can be weighted, though the weighted degree sequence will not be
preserved.
3. A must be undirected.
'''
A = A.copy()
i, j = np.where(np.triu(A, 1))
i.setflags(write=True)
j.setflags(write=True)
m = len(i)
nswap = 0
while nswap < maxswap:
while True:
e1, e2 = np.random.randint(m, size=(2,))
while e1 == e2:
e2 = np.random.randint(m)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if np.random.random() > .5:
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (A[a, d] or A[c, b] or B[a, d] or B[c, b]): # avoid specified ixes
A[a, d] = A[a, b]
A[a, b] = 0
A[d, a] = A[b, a]
A[b, a] = 0
A[c, b] = A[c, d]
A[c, d] = 0
A[b, c] = A[d, c]
A[d, c] = 0
j[e1] = d
j[e2] = b # reassign edge indices
nswap += 1
return A
def randomizer_bin_und(R, alpha):
'''
This function randomizes a binary undirected network, while preserving
the degree distribution. The function directly searches for rewirable
edge pairs (rather than trying to rewire edge pairs at random), and
hence avoids long loops and works especially well in dense matrices.
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
alpha : float
fraction of edges to rewire
Returns
-------
R : NxN np.ndarray
randomized network
'''
R = binarize(R, copy=True) # binarize
if not np.all(R == R.T):
raise BCTParamError(
'randomizer_bin_und only takes undirected matrices')
ax = len(R)
nr_poss_edges = (np.dot(ax, ax) - ax) / 2 # find maximum possible edges
savediag = np.diag(R)
np.fill_diagonal(R, np.inf) # replace diagonal with high value
# if there are more edges than non-edges, invert the matrix to reduce
# computation time. "invert" means swap meaning of 0 and 1, not matrix
# inversion
i, j = np.where(np.triu(R, 1))
k = len(i)
if k > nr_poss_edges / 2:
swap = True
R = np.logical_not(R)
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
else:
swap = False
# exclude fully connected nodes
fullnodes = np.where((np.sum(np.triu(R, 1), axis=0) +
np.sum(np.triu(R, 1), axis=1).T) == (ax - 1))
if np.size(fullnodes):
R[fullnodes, :] = 0
R[:, fullnodes] = 0
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
if k == 0 or k >= (nr_poss_edges - 1):
raise BCTParamError("No possible randomization")
for it in range(k):
if np.random.random() > alpha:
continue # rewire alpha% of edges
a = i[it]
b = j[it] # it is the chosen edge from a<->b
alliholes, = np.where(R[:, a] == 0) # find where each end can connect
alljholes, = np.where(R[:, b] == 0)
# we can only use edges with connection to neither node
i_intersect = np.intersect1d(alliholes, alljholes)
# find which of these nodes are connected
ii, jj = np.where(R[np.ix_(i_intersect, i_intersect)])
# if there is an edge to switch
if np.size(ii):
# choose one randomly
nummates = np.size(ii)
mate = np.random.randint(nummates)
# randomly orient the second edge
if np.random.random() > .5:
c = i_intersect[ii[mate]]
d = i_intersect[jj[mate]]
else:
d = i_intersect[ii[mate]]
c = i_intersect[jj[mate]]
# swap the edges
R[a, b] = 0
R[c, d] = 0
R[b, a] = 0
R[d, c] = 0
R[a, c] = 1
R[b, d] = 1
R[c, a] = 1
R[d, b] = 1
# update the edge index (this is inefficient)
for m in range(k):
if i[m] == d and j[m] == c:
i.setflags(write=True)
j.setflags(write=True)
i[it] = c
j[m] = b
elif i[m] == c and j[m] == d:
i.setflags(write=True)
j.setflags(write=True)
j[it] = c
i[m] = b
# restore fullnodes
if np.size(fullnodes):
R[fullnodes, :] = 1
R[:, fullnodes] = 1
# restore inversion
if swap:
R = np.logical_not(R)
# restore diagonal
np.fill_diagonal(R, 0)
R += savediag
return np.array(R, dtype=int)
| {
"content_hash": "e4243320d80eb2a203ee9a9498caabf3",
"timestamp": "",
"source": "github",
"line_count": 1718,
"max_line_length": 100,
"avg_line_length": 31.352735739231665,
"alnum_prop": 0.48917644437843455,
"repo_name": "boomsbloom/dtm-fmri",
"id": "74f03993eec3db326dfd5add9f3e019f8d9170cf",
"size": "53864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DTM/for_gensim/lib/python2.7/site-packages/bct/algorithms/reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "413670"
},
{
"name": "C++",
"bytes": "262666"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "14725"
},
{
"name": "HTML",
"bytes": "555708"
},
{
"name": "JavaScript",
"bytes": "23921"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Matlab",
"bytes": "36260"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "46698963"
},
{
"name": "R",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "11728"
},
{
"name": "TeX",
"bytes": "18567"
}
],
"symlink_target": ""
} |
def extractThenewscifiCom(item):
'''
DISABLED
Parser for 'thenewscifi.com'
'''
if 'film' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "9d7a1181a97c7b79e2e7579f24e525ec",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 104,
"avg_line_length": 24.75,
"alnum_prop": 0.6279461279461279,
"repo_name": "fake-name/ReadableWebProxy",
"id": "44d3985e04eabaa406347c57dd4e6038d08cc13b",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractThenewscifiCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""
Policy based configuration of libvirt objects
This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
from nova.pci import utils as pci_utils
def set_vif_guest_frontend_config(conf, mac, model, driver):
"""Populate a LibvirtConfigGuestInterface instance
with guest frontend details.
"""
conf.mac_addr = mac
if model is not None:
conf.model = model
if driver is not None:
conf.driver_name = driver
def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for a software bridge.
"""
conf.net_type = "bridge"
conf.source_dev = brname
if tapname:
conf.target_dev = tapname
conf.script = ""
def set_vif_host_backend_ethernet_config(conf, tapname):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an externally configured
host device.
NB use of this configuration is discouraged by
libvirt project and will mark domains as 'tainted'.
"""
conf.net_type = "ethernet"
conf.target_dev = tapname
conf.script = ""
def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an OpenVSwitch bridge.
"""
conf.net_type = "bridge"
conf.source_dev = brname
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid", interfaceid)
if tapname:
conf.target_dev = tapname
conf.script = ""
def set_vif_host_backend_802qbg_config(conf, devname, managerid,
typeid, typeidversion,
instanceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbg device.
"""
conf.net_type = "direct"
conf.source_dev = devname
conf.source_mode = "vepa"
conf.vporttype = "802.1Qbg"
conf.add_vport_param("managerid", managerid)
conf.add_vport_param("typeid", typeid)
conf.add_vport_param("typeidversion", typeidversion)
conf.add_vport_param("instanceid", instanceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an device that supports hardware
virtual ethernet bridge.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vlan = vlan
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_direct_config(conf, devname):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
"""
conf.net_type = "direct"
conf.source_mode = "passthrough"
conf.source_dev = devname
conf.model = "virtio"
def set_vif_host_backend_vhostuser_config(conf, mode, path):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for vhostuser socket.
"""
conf.net_type = "vhostuser"
conf.vhostuser_type = "unix"
conf.vhostuser_mode = mode
conf.vhostuser_path = path
def set_vif_bandwidth_config(conf, inst_type):
"""Config vif inbound/outbound bandwidth limit. parameters are
set in instance_type_extra_specs table, key is in the format
quota:vif_inbound_average.
"""
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in inst_type.get('extra_specs', {}).iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in bandwidth_items:
setattr(conf, scope[1], value)
| {
"content_hash": "6140a100c9cbe235b20b65a640cf3b21",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 77,
"avg_line_length": 31.162337662337663,
"alnum_prop": 0.6526359658262137,
"repo_name": "affo/nova",
"id": "716067c232a078b0d9285a7a7b234b3ee58d4120",
"size": "5409",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/designer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "15659662"
},
{
"name": "Shell",
"bytes": "20716"
}
],
"symlink_target": ""
} |
import subprocess
import time
import json
import re
import logging
import db_devices as model
import udev_query
import datetime
import uuid
def runpreloadcommand(cmd,timeout):
process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processRc = None
handleprocess = True
counter = 0
stdout = ''
stderr = ''
while handleprocess:
counter += 1
time.sleep(1)
cout,cerr = process.communicate()
stdout += cout
stderr += cerr
process.poll()
processRc = process.returncode
if processRc != None:
break
if counter == timeout:
os.kill(process.pid, signal.SIGQUIT)
if counter > timeout:
os.kill(process.pid, signal.SIGKILL)
processRc = -9
break
return (processRc,stdout,stderr)
def lsblk():
log = logging.getLogger("lsblk")
wantedFields = ["NAME","KNAME","MOUNTPOINT","PARTUUID","SERIAL","FSTYPE","RM","SIZE","FSTYPE","UUID","OWNER","GROUP","MODE","WWN","VENDOR","MAJ:MIN"]
lkfields = ",".join(wantedFields)
command = "lsblk --output %s --pairs" % (lkfields)
#log.debug("command=%s" % (command))
processRc,stdout,stderr = runpreloadcommand(command,10)
#log.debug("stdout=%s" % (stdout))
output = {}
for line in stdout.split("\n"):
parsedKetValue = {}
for key_value in re.split(r'[ ](?=[A-Z]+\b)', line):
key_value_list = re.split(r'=', key_value)
if len(key_value_list) != 2:
continue
parsedKetValue[str(key_value_list[0])] = str(key_value_list[1]).strip('"')
if not 'KNAME' in parsedKetValue.keys():
continue
output[parsedKetValue['KNAME']] = parsedKetValue
#json_line = str(output)
#print json_line
#parsedJson = json.loads(json_line)
#print json.dumps(output,sort_keys=True, indent=4)
return output
def updatdatabase(session=None):
did_something = False
log = logging.getLogger("updatdatabase")
blocks_known = set()
query_block = session.query(model.Block)
if query_block.count() == 0:
log.warning('No Blocks found')
for block in query_block:
if block.devName == None:
continue
blocks_known.add(block.devName)
blockdevices = lsblk()
blockfound = set(blockdevices.keys())
blocks_discoverd = blockfound.difference(blocks_known)
blocks_lost = blocks_known.difference(blockfound)
id_update_type = None
id_update = None
if len(blocks_discoverd) > 0:
find_existing = session.query(model.job_namespace,
).\
filter(model.job_namespace.name == "kname_new")
if find_existing.count() == 0:
newjob_namespace = model.job_namespace()
newjob_namespace.name = "kname_new"
session.add(newjob_namespace)
session.commit()
find_existing = session.query(model.job_namespace,).\
filter(model.job_namespace.name == "kname_new")
id_update_type = int(find_existing.first().id)
else:
id_update_type = int(find_existing.first().id)
find_existing = session.query(model.job_def).\
filter(model.job_def.fk_type == id_update_type)
if find_existing.count() == 0:
newjob_def = model.job_def()
newjob_def.fk_type = id_update_type
newjob_def.cmdln_template = "updatdatabase"
newjob_def.cmdln_paramters = "updatdatabase"
session.add(newjob_def)
session.commit()
find_existing = session.query(model.job_def).\
filter(model.job_def.fk_type == id_update_type)
id_update = int(find_existing.first().id)
find_existing = session.query(model.job_execution).\
filter(model.job_execution.fk_update == id_update)
if find_existing.count() == 0:
newjob_def = model.job_execution()
newjob_def.fk_update = id_update
newjob_def.cmdln = "here is is"
newjob_def.created = datetime.datetime.now()
newjob_def.uuid = str(uuid.uuid1())
session.add(newjob_def)
session.commit()
for device_key in blocks_discoverd:
find_existing = session.query(model.job_execution).\
filter(model.Block.devName == device_key).\
filter(model.job_def.fk_type == model.job_namespace.id).\
filter("lsblk" == model.job_namespace.name).\
filter(model.job_execution.fk_update == model.job_namespace.id)
for i in find_existing:
log.debug("i=%s" % (i))
#log.debug("device_details=%s" % (device_details))
device_details = blockdevices[device_key]
devName = device_details.get("KNAME")
if devName == None:
continue
newImage = model.Block()
newImage.devName = devName
#log.debug("newImage.devName=%s" % (newImage.devName))
newImage.idVendor = device_details.get("ID_VENDOR")
newImage.idProduct = device_details.get("DEVNAME")
newImage.devicenodes_major = device_details.get("MAJOR")
newImage.devicenodes_minor = device_details.get("MINOR")
newImage.device_removable = device_details.get("RM")
newImage.mountpoint = device_details.get('MOUNTPOINT')
newImage.fk_update = id_update
session.add(newImage)
session.commit()
did_something = True
if did_something:
find_existing = session.query(model.job_namespace,
).\
filter(model.job_namespace.name == "kname_new")
if find_existing.count() == 0:
newjob_namespace = model.job_namespace()
newjob_namespace.name = "kname_new"
session.add(newjob_namespace)
find_existing = session.query(model.job_def,
).\
filter(model.job_def.fk_type ==model.job_namespace.id).\
filter(model.job_namespace.name == "kname_new")
if find_existing.count() == 0:
index = None
find_type = session.query(model.job_namespace,
).\
filter(model.job_namespace.name == "kname_new")
if find_type.count() == 0:
newjob_namespace = model.job_namespace()
newjob_namespace.name = "kname_new"
session.add(newjob_namespace)
session.commit()
find_type = session.query(model.job_namespace,
).\
filter(model.job_namespace.name == "kname_new")
typeID = int(find_type.one().id)
newjob_namespace = model.job_def()
newjob_namespace.fk_type = typeID
newjob_namespace.name = "kname_new"
session.add(newjob_namespace)
session.commit()
find_existing = session.query(model.job_def,
).\
filter(model.job_def.fk_type ==model.job_namespace.id).\
filter(model.job_namespace.name == "kname_new")
for device_key in blocks_discoverd:
find_existing = session.query(model.job_execution).\
filter(model.Block.devName == device_key).\
filter(model.job_def.fk_type == model.job_namespace.id).\
filter("lsblk" == model.job_namespace.name).\
filter(model.job_execution.fk_update == model.job_namespace.id)
for device_key in blocks_lost:
log.warning('Code not complete')
return blockdevices
| {
"content_hash": "de6dba322cb8bd01efc9c478f5912812",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 153,
"avg_line_length": 37.791044776119406,
"alnum_prop": 0.5864928909952607,
"repo_name": "osynge/pmpman",
"id": "78940f2b1eaf7a855efd92716cac872a39a95a3c",
"size": "7596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmpmanager/lsblk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "277717"
},
{
"name": "Shell",
"bytes": "1045"
}
],
"symlink_target": ""
} |
import os
import pygame
from pygame.locals import *
currentmusic = ""
def load_image(name, colorkey=None):
fullname = os.path.join('img', name)
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', fullname
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
def load_music(name):
fullname = os.path.join('snd',name);
currentmusic = fullname
try:
music = pygame.mixer.music.load (fullname)
except pygame.error, message:
print 'Cannot load music:', fullname
raise SystemExit, message
pygame.mixer.music.set_volume(0.3)
pygame.mixer.music.play(-1)
def load_sound(name):
fullname = os.path.join('snd',name);
try:
sound = pygame.mixer.Sound(fullname)
sound.set_volume(0.5)
except pygame.error, message:
print 'Cannot load sound:', fullname
raise SystemExit, message
return sound
def stop_music():
pygame.mixer.music.stop()
| {
"content_hash": "006da312b69f436a040153654a8649ff",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 50,
"avg_line_length": 26.466666666666665,
"alnum_prop": 0.6389588581024349,
"repo_name": "borgaster/SpaceWarsEvolved",
"id": "9dd4ce1c4caabedaaecffbbc848a911b7d81ea5b",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138920"
}
],
"symlink_target": ""
} |
import numpy
import scipy
import unittest
from redis import Redis
from nearpy.storage import MemoryStorage, RedisStorage
class TestStorage(unittest.TestCase):
def setUp(self):
self.memory = MemoryStorage()
self.redis_object = Redis(host='localhost',
port=6379, db=0)
self.redis_storage = RedisStorage(self.redis_object)
def test_memory_storage(self):
x = numpy.random.randn(100, 1)
bucket_key = '23749283743928748'
x_data = ['one', 'two', 'three']
self.memory.store_vector('testHash', bucket_key, x, x_data)
X = self.memory.get_bucket('testHash', bucket_key)
self.assertEqual(len(X), 1)
y = X[0][0]
y_data = X[0][1]
self.assertEqual(len(y), len(x))
self.assertEqual(type(x), type(y))
for k in range(100):
self.assertEqual(y[k], x[k])
self.assertEqual(type(y_data), type(x_data))
self.assertEqual(len(y_data), len(x_data))
for k in range(3):
self.assertEqual(y_data[k], x_data[k])
self.memory.clean_all_buckets()
self.assertEqual(self.memory.get_bucket('testHash', bucket_key), [])
def test_redis_storage(self):
self.redis_storage.clean_all_buckets()
x = numpy.random.randn(100, 1)
bucket_key = '23749283743928748'
x_data = ['one', 'two', 'three']
self.redis_storage.store_vector('testHash', bucket_key, x, x_data)
X = self.redis_storage.get_bucket('testHash', bucket_key)
self.assertEqual(len(X), 1)
y = X[0][0]
y_data = X[0][1]
self.assertEqual(len(y), len(x))
self.assertEqual(type(x), type(y))
for k in range(100):
self.assertEqual(y[k], x[k])
self.assertEqual(type(y_data), type(x_data))
self.assertEqual(len(y_data), len(x_data))
for k in range(3):
self.assertEqual(y_data[k], x_data[k])
self.redis_storage.clean_all_buckets()
self.assertEqual(self.redis_storage.get_bucket('testHash',
bucket_key), [])
def test_redis_storage_sparse(self):
self.redis_storage.clean_all_buckets()
x = scipy.sparse.rand(100, 1, density=0.1)
bucket_key = '23749283743928748'
x_data = ['one', 'two', 'three']
self.redis_storage.store_vector('testHash', bucket_key, x, x_data)
X = self.redis_storage.get_bucket('testHash', bucket_key)
self.assertEqual(len(X), 1)
y = X[0][0]
y_data = X[0][1]
self.assertEqual(type(x), type(y))
self.assertEqual(x.shape[0], y.shape[0])
self.assertEqual(x.shape[1], y.shape[1])
self.assertTrue((y - x).sum() == 0.0)
self.assertEqual(type(y_data), type(x_data))
self.assertEqual(len(y_data), len(x_data))
for k in range(3):
self.assertEqual(y_data[k], x_data[k])
self.redis_storage.clean_all_buckets()
self.assertEqual(self.redis_storage.get_bucket('testHash',
bucket_key), [])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e5d8194daa18b264f5cfff122c5f7a07",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 38.54216867469879,
"alnum_prop": 0.5642388246326977,
"repo_name": "nguyenhoan1988/NearPy",
"id": "43a66faadac637da880801df7594aad2cece8c07",
"size": "4320",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nearpy/tests/storage_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183109"
}
],
"symlink_target": ""
} |
"""Defines SortedList class."""
import bisect
class SortedList(object):
"""Maintains a list of sorted items, with fast trimming
using less-than/greater-than comparison."""
def __init__(self, donor=list()):
"""Initialize the object with a copy of the donor list, sorted."""
self._list = sorted(donor[:])
def add(self, item):
"""Add item to the list while maintaining sorted order."""
bisect.insort_left(self._list, item)
def getCountLT(self, item):
"""Return number of elements less than *item*."""
index = bisect.bisect_left(self._list, item)
return index
def getCountGT(self, item):
"""Return number of elements greater than *item*."""
index = bisect.bisect_right(self._list, item)
return len(self._list) - index
def removeLT(self, item):
"""Trim off any elements less than *item*.
Return number of elements trimmed."""
count = self.getCountLT(item)
self._list = self._list[count:]
return count
| {
"content_hash": "4c2e5219c69dcd01ac7c290143392e5a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.6178707224334601,
"repo_name": "vmlaker/coils",
"id": "3d91f7f4c893bf6c9fe92784cceb0ef519b98add",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coils/SortedList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2351"
},
{
"name": "Python",
"bytes": "28619"
}
],
"symlink_target": ""
} |
from .copy_source import CopySource
class AzureDataLakeStoreSource(CopySource):
"""A copy activity Azure Data Lake source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param recursive: If true, files under the folder path will be read
recursively. Default is true. Type: boolean (or Expression with resultType
boolean).
:type recursive: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'recursive': {'key': 'recursive', 'type': 'object'},
}
def __init__(self, additional_properties=None, source_retry_count=None, source_retry_wait=None, recursive=None):
super(AzureDataLakeStoreSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait)
self.recursive = recursive
self.type = 'AzureDataLakeStoreSource'
| {
"content_hash": "be6d5c0a7ae1e84cb50c1b19e5ed12dc",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 175,
"avg_line_length": 42.925,
"alnum_prop": 0.6557949912638322,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "ddd9319b43d59a3c28d8879fccb7ea20b228ff57",
"size": "2191",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_data_lake_store_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import arcpy
app_found = 'NOT_SET'
toolbox10xSuffix = "_10.3"
def GetApplication():
'''Return app environment as: ARCMAP, ARCGIS_PRO, OTHER'''
global app_found
if app_found != 'NOT_SET':
return app_found
try:
from arcpy import mp
except ImportError:
try:
from arcpy import mapping
mxd = arcpy.mapping.MapDocument("CURRENT")
app_found = "ARCMAP"
return app_found
except:
app_found = "OTHER"
return app_found
try:
aprx = arcpy.mp.ArcGISProject('CURRENT')
app_found = "ARCGIS_PRO"
return app_found
except:
app_found = "OTHER"
return app_found
| {
"content_hash": "d0a08c6412f4733889e737aedb994cd0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 24,
"alnum_prop": 0.5569444444444445,
"repo_name": "pshowalter/solutions-geoprocessing-toolbox",
"id": "0678e11706067cdc9222e0d6fa3a8aa3273e010f",
"size": "1912",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "clearing_operations/scripts/Utilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6218"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "220035"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "807424"
}
],
"symlink_target": ""
} |
"""Module containing HBase installation and cleanup functions.
HBase is a scalable NoSQL database built on Hadoop.
https://hbase.apache.org/
"""
import functools
import os
import posixpath
from perfkitbenchmarker import data
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hadoop
HBASE_VERSION = '1.0.2'
HBASE_URL = ('http://www.us.apache.org/dist/hbase/hbase-{0}/'
'hbase-{0}-bin.tar.gz').format(HBASE_VERSION)
DATA_FILES = ['hbase/hbase-site.xml.j2', 'hbase/regionservers.j2',
'hbase/hbase-env.sh.j2']
HBASE_DIR = posixpath.join(vm_util.VM_TMP_DIR, 'hbase')
HBASE_BIN = posixpath.join(HBASE_DIR, 'bin')
HBASE_CONF_DIR = posixpath.join(HBASE_DIR, 'conf')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in DATA_FILES:
data.ResourcePath(resource)
def _Install(vm):
vm.Install('hadoop')
vm.Install('curl')
vm.RemoteCommand(('mkdir {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
HBASE_DIR, HBASE_URL))
def YumInstall(vm):
"""Installs HBase on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs HBase on the VM."""
_Install(vm)
def _RenderConfig(vm, master_ip, zk_ips, regionserver_ips):
# Use the same heap configuration as Cassandra
memory_mb = vm.total_memory_kb // 1024
hbase_memory_mb = max(min(memory_mb // 2, 1024),
min(memory_mb // 4, 8192))
context = {
'master_ip': master_ip,
'worker_ips': regionserver_ips,
'zk_quorum_ips': zk_ips,
'hadoop_private_key': hadoop.HADOOP_PRIVATE_KEY,
'hbase_memory_mb': hbase_memory_mb,
'scratch_dir': vm.GetScratchDir(),
}
for file_name in DATA_FILES:
file_path = data.ResourcePath(file_name)
remote_path = posixpath.join(HBASE_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def ConfigureAndStart(master, regionservers, zk_nodes):
"""Configure HBase on a cluster.
Args:
master: VM. Master VM.
regionservers: List of VMs.
"""
vms = [master] + regionservers
def LinkNativeLibraries(vm):
vm.RemoteCommand(('mkdir {0}/lib/native && '
'ln -s {1} {0}/lib/native/Linux-amd64-64').format(
HBASE_DIR,
posixpath.join(hadoop.HADOOP_DIR, 'lib', 'native')))
vm_util.RunThreaded(LinkNativeLibraries, vms)
fn = functools.partial(_RenderConfig, master_ip=master.internal_ip,
zk_ips=[vm.internal_ip for vm in zk_nodes],
regionserver_ips=[regionserver.internal_ip
for regionserver in regionservers])
vm_util.RunThreaded(fn, vms)
master.RemoteCommand('{0} dfs -mkdir /hbase'.format(
posixpath.join(hadoop.HADOOP_BIN, 'hdfs')))
master.RemoteCommand(posixpath.join(HBASE_BIN, 'start-hbase.sh'))
def Stop(master):
"""Stop HBase.
Args:
master: VM. Master VM.
"""
master.RemoteCommand(posixpath.join(HBASE_BIN, 'stop-hbase.sh'))
| {
"content_hash": "d9331d745282813d4bc0d1bf08cc67e8",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 29.883928571428573,
"alnum_prop": 0.6340005975500448,
"repo_name": "mateusz-blaszkowski/PerfKitBenchmarker",
"id": "d8f80047d95eef1f030ebe42cd763f264a9fe2fc",
"size": "3957",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/linux_packages/hbase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1282006"
},
{
"name": "Shell",
"bytes": "23160"
}
],
"symlink_target": ""
} |
"""
Handle the Pagination for a Controller.
"""
class Pagination(object):
"""
The base implementation of Pagination.
__init__ define max, offset and count.
"""
def __init__(self, max_result,
offset_key="offset",
count_key="count"):
self.max = max_result
self.offset_key = offset_key
self.count_key = count_key
def paginate(self, request):
"""
return an offset, a count and the request kwargs without
pagination parameters
"""
request_kwargs = request.values.to_dict()
offset = int(request_kwargs.pop(self.offset_key, 0))
count = int(request_kwargs.pop(self.count_key, self.max))
if count > self.max:
count = self.max
return offset, count, request_kwargs
def get_metadata(self, total=0, offset=0, count=0, **filters):
meta = {self.offset_key: offset,
self.count_key: count,
"total_count": total,
"filters": {}
}
for k, v in filters.iteritems():
meta["filters"][k] = v
if offset == 0:
meta['previous'] = "null"
else:
meta["previous"] = offset - count
if meta["previous"] < 0:
meta["previous"] = 0
if meta['previous'] != "null":
meta["previous"] = "?{0}={1}".format(self.offset_key,
meta["previous"])
meta["next"] = offset + count
if meta["next"] > total:
meta["next"] = "null"
if meta['next'] != "null":
meta["next"] = "?{0}={1}".format(self.offset_key,
meta["next"])
return meta
| {
"content_hash": "a32534f1464656e1902724157cad766e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 66,
"avg_line_length": 29.683333333333334,
"alnum_prop": 0.4884896125772038,
"repo_name": "boblefrag/python-rest-api-framework",
"id": "6eebfd930599a148b086ecb87ee72b6f7815ebdf",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_api_framework/pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "90887"
}
],
"symlink_target": ""
} |
from memoized import memoized
class SupplyInterface(object):
def __init__(self, domain=None):
self.domain = domain
@classmethod
def create_from_location(cls, domain, location):
from corehq.apps.commtrack.helpers import make_supply_point
return make_supply_point(domain, location)
@property
@memoized
def supply_point(self):
from corehq.form_processor.backends.sql.supply import SupplyPointSQL
return SupplyPointSQL
def get_or_create_by_location(self, location):
return self.supply_point.get_or_create_by_location(location)
def get_by_location(self, location):
return self.supply_point.get_by_location(location)
def get_supply_point_ids_by_location(self):
return self.supply_point.get_supply_point_ids_by_location(self.domain)
def get_closed_and_open_by_location_id_and_domain(self, domain, location_id):
"""
This also returns closed supply points.
Please use location.linked_supply_point() instead.
"""
return self.supply_point.get_closed_and_open_by_location_id_and_domain(domain, location_id)
def get_supply_point(self, supply_point_id):
return self.supply_point.get_supply_point(supply_point_id)
def get_supply_points(self, supply_point_ids):
return self.supply_point.get_supply_points(supply_point_ids)
| {
"content_hash": "fa5f8917729a15c148f053cddb30c82d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 99,
"avg_line_length": 34.65,
"alnum_prop": 0.6940836940836941,
"repo_name": "dimagi/commcare-hq",
"id": "60ab0349322932dfed91aed2bd582d65f8d8b804",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/form_processor/interfaces/supply.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
FILE_NAME = 'fdic_failed_bank_list.csv'
# write a function to do some exploring with strings
# open the csv
# create the object that represents the data in the csv file
# create a variable to represent the header row
# from the last lesson we know the variable header_row refers to a list
# let's isolate the string that is 'Acquiring Institution'
# we'll do this by isolating in the list what is know as the index of the string
# let's make sure this is a string
# let's get the length of the string
# create a variable to hold our string
# let's see how string subscripting works
# let's print the third characters
# let's print the first five characters
# let's print everything after the first five characters
# let's capitalize the first letter in the string
# let's lowercase the string
# let's uppercase the string
# close the csv file when we're done
basic_string_methods('fdic_failed_bank_list.csv') | {
"content_hash": "b42e3563b327c77cef8d80adb4b9931e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 26.83783783783784,
"alnum_prop": 0.702920443101712,
"repo_name": "tommeagher/pycar14",
"id": "ddbab78873b86d4b2e72bca74041b8f8f0197d98",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project1/step_5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "81996"
},
{
"name": "Python",
"bytes": "41352"
}
],
"symlink_target": ""
} |
import os
import pickle
import logging
from .DataLoader import DataLoader
from .Dataset import Dataset
def load_dataset(filename, n):
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='latin-1')
logging.info("Loaded {} examples from {}".format(len(data),filename))
if n > -1:
data = data[:n]
logging.info("Using {}".format(len(data)))
dataset = Dataset.from_records(data)
logging.info("Dataset size: {}".format(dataset.bytes))
del data
return dataset
def get_data_loader(filename, n, batch_size):
dataset = load_dataset(filename, n)
data_loader = DataLoader(dataset, batch_size)
return data_loader
def get_train_data_loader(data_dir, n_train, n_valid, batch_size, **kwargs):
train_data_loader = get_data_loader(os.path.join(data_dir, 'train.pkl'), n_train, batch_size)
valid_data_loader = get_data_loader(os.path.join(data_dir, 'valid.pkl'), n_valid, batch_size)
return train_data_loader, valid_data_loader
def get_test_data_loader(data_dir, n_test, batch_size, **kwargs):
test_data_loader = get_data_loader(os.path.join(data_dir, 'valid.pkl'), n_test, batch_size)
return test_data_loader
| {
"content_hash": "ef83663e60d8e495cf51edcc453ff2b8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 97,
"avg_line_length": 37.21875,
"alnum_prop": 0.6834592779177162,
"repo_name": "isaachenrion/jets",
"id": "2de3574ec555cce2b8b3ab2f6adfe785fdda687c",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/proteins/data_ops/get_data_loader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11751"
},
{
"name": "Python",
"bytes": "258548"
},
{
"name": "Shell",
"bytes": "6358"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def runTestPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def runTestPartialRunIncomplete(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def runTestConcurrentPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def runTestManyPartialRun(self, sess):
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.multiply(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def runTestRunAndPartialRun(self, sess):
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def runTestPartialRunMissingPlaceholderFeedException(self, sess):
x = array_ops.placeholder(dtypes.float32, shape=())
fetches = [x * 2, x * 3]
handle = sess.partial_run_setup(fetches=fetches, feeds=[])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'You must feed a value for placeholder'):
sess.partial_run(handle, fetches[0])
def testPartialRunDirect(self):
self.runTestPartialRun(session.Session())
def testPartialRunIncompleteDirect(self):
self.runTestPartialRunIncomplete(session.Session())
def testConcurrentPartialRunDirect(self):
self.runTestConcurrentPartialRun(session.Session())
def testManyPartialRunDirect(self):
self.runTestManyPartialRun(session.Session())
def testRunAndPartialRunDirect(self):
self.runTestRunAndPartialRun(session.Session())
def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
self.runTestPartialRunMissingPlaceholderFeedException(session.Session())
def testPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRun(session.Session(server.target))
def testPartialRunIncompleteDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunIncomplete(session.Session(server.target))
def testConcurrentPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestConcurrentPartialRun(session.Session(server.target))
def testManyPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestManyPartialRun(session.Session(server.target))
def testRunAndPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestRunAndPartialRun(session.Session(server.target))
def testPartialRunMissingPlaceholderFeedExceptionDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunMissingPlaceholderFeedException(
session.Session(server.target))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
if __name__ == '__main__':
googletest.main()
| {
"content_hash": "df1f4e83d8b2f8e6fbecff1c82f8f666",
"timestamp": "",
"source": "github",
"line_count": 1647,
"max_line_length": 80,
"avg_line_length": 40.335761991499695,
"alnum_prop": 0.6224015173182003,
"repo_name": "SnakeJenny/TensorFlow",
"id": "99402ff2edefee79ea7694891e6adff43872bad6",
"size": "67123",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/client/session_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "136455"
},
{
"name": "C++",
"bytes": "5563037"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "638814"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5534"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "45325"
},
{
"name": "Python",
"bytes": "2683634"
},
{
"name": "Shell",
"bytes": "6026"
},
{
"name": "TypeScript",
"bytes": "238945"
}
],
"symlink_target": ""
} |
"""Storj API module."""
from binascii import b2a_hex
def ecdsa_to_hex(ecdsa_key):
"""
Return hexadecimal string representation of the ECDSA key.
Args:
ecdsa_key (bytes): ECDSA key.
Raises:
TypeError: if the ECDSA key is not an array of bytes.
Returns:
str: hexadecimal string representation of the ECDSA key.
"""
return '04%s' % b2a_hex(ecdsa_key).decode('ascii')
| {
"content_hash": "90bba0f7200c8bad7df737c0ea370935",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 22.210526315789473,
"alnum_prop": 0.6374407582938388,
"repo_name": "Miskerest/storj-python-sdk",
"id": "104489d1d461f9f4a11d1710fc1ed8c2f0535522",
"size": "446",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "storj/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2554"
},
{
"name": "Python",
"bytes": "105012"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2022, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'Shadow Daemon (Zecure)'
def is_waf(self):
schemes = [
self.matchContent(r"<h\d{1}>\d{3}.forbidden<.h\d{1}>"),
self.matchContent(r"request forbidden by administrative rules")
]
if all(i for i in schemes):
return True
return False | {
"content_hash": "3227a31edb8cabc971c55e37febc7c27",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 23.25,
"alnum_prop": 0.6370967741935484,
"repo_name": "EnableSecurity/wafw00f",
"id": "23e24ad43825f070741e6193923f4d458950fcf5",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wafw00f/plugins/shadowd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "97"
},
{
"name": "Makefile",
"bytes": "339"
},
{
"name": "Python",
"bytes": "109477"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse
from django.db import connection
from django.forms import EmailField, IntegerField
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.html import HTMLParseError, parse_html
from django.test.utils import (CaptureQueriesContext,
IgnoreAllDeprecationWarningsMixin, override_settings)
from django.utils import six
from .models import Person
class SkippingTestCase(TestCase):
def test_skip_unless_db_feature(self):
"A test that might be skipped is actually called."
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
self.assertRaises(ValueError, test_func)
class SkippingClassTestCase(TestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(unittest.TestCase):
def test_dummy(self):
return
@skipIfDBFeature("__class__")
class SkippedTests(unittest.TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests('test_dummy'))
try:
test_suite.addTest(SkippedTests('test_will_be_skipped'))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
result = unittest.TextTestRunner(stream=six.StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.skipped), 1)
class AssertNumQueriesTests(TestCase):
urls = 'test_utils.urls'
def test_assert_num_queries(self):
def test_func():
raise ValueError
self.assertRaises(ValueError, self.assertNumQueries, 2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class AssertQuerysetEqualTests(TestCase):
def setUp(self):
self.p1 = Person.objects.create(name='p1')
self.p2 = Person.objects.create(name='p2')
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p1), repr(self.p2)]
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p2), repr(self.p1)],
ordered=False
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
with self.assertRaises(ValueError):
self.assertQuerysetEqual(
Person.objects.all(),
[repr(self.p1), repr(self.p2)]
)
# No error for one value.
self.assertQuerysetEqual(
Person.objects.filter(name='p1'),
[repr(self.p1)]
)
class CaptureQueriesContextManagerTests(TestCase):
urls = 'test_utils.urls'
def setUp(self):
self.person_pk = six.text_type(Person.objects.create(name='test').pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
self.assertIn(self.person_pk, captured_queries[1]['sql'])
class AssertNumQueriesContextManagerTests(TestCase):
urls = 'test_utils.urls'
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
self.assertIn("1 queries executed, 2 expected", str(exc_info.exception))
self.assertIn("Captured queries were", str(exc_info.exception))
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
class AssertTemplateUsedContextManagerTests(TestCase):
urls = 'test_utils.urls'
def test_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed(template_name='template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
render_to_string('template_used/base.html')
def test_nested_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/include.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/extends.html'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/alternative.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateNotUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
def test_not_used(self):
with self.assertTemplateNotUsed('template_used/base.html'):
pass
with self.assertTemplateNotUsed('template_used/alternative.html'):
pass
def test_error_message(self):
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed('template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed(template_name='template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html.*template_used/alternative\.html$'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
with self.assertRaises(AssertionError) as cm:
response = self.client.get('/test_utils/no_template_used/')
self.assertTemplateUsed(response, 'template_used/base.html')
self.assertEqual(cm.exception.args[0], "No templates used to render the response")
def test_failure(self):
with self.assertRaises(TypeError):
with self.assertTemplateUsed():
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
render_to_string('template_used/base.html')
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(template_name=''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
class HTMLEqualTests(TestCase):
def test_html_parser(self):
element = parse_html('<div><p>Hello</p></div>')
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, 'p')
self.assertEqual(element.children[0].children[0], 'Hello')
parse_html('<p>')
parse_html('<p attr>')
dom = parse_html('<p>foo')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, 'p')
self.assertEqual(dom[0], 'foo')
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html('''
<script>
var js_sha_link='<p>***</p>';
</script>
''')
# script content will be parsed to text
dom = parse_html('''
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
''')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = ('br', 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
for tag in self_closing_tags:
dom = parse_html('<p>Hello <%s> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
dom = parse_html('<p>Hello <%s /> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
def test_simple_equal_html(self):
self.assertHTMLEqual('', '')
self.assertHTMLEqual('<p></p>', '<p></p>')
self.assertHTMLEqual('<p></p>', ' <p> </p> ')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div><p>Hello</p></div>')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div> <p>Hello</p> </div>')
self.assertHTMLEqual(
'<div>\n<p>Hello</p></div>',
'<div><p>Hello</p></div>\n')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<p>Hello World !</p>',
'<p>Hello World\n\n!</p>')
self.assertHTMLEqual('<p> </p>', '<p></p>')
self.assertHTMLEqual('<p/>', '<p></p>')
self.assertHTMLEqual('<p />', '<p></p>')
self.assertHTMLEqual('<input checked>', '<input checked="checked">')
self.assertHTMLEqual('<p>Hello', '<p> Hello')
self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World')
def test_ignore_comments(self):
self.assertHTMLEqual(
'<div>Hello<!-- this is a comment --> World!</div>',
'<div>Hello World!</div>')
def test_unequal_html(self):
self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>')
self.assertHTMLNotEqual('<p>foobar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo </p>')
self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo </p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span><span>World</span></p>',
'<p><span>Hello</span>World</p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span>World</p>',
'<p><span>Hello</span><span>World</span></p>')
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />',
'<input id="id_name" type="text" />')
self.assertHTMLEqual(
'''<input type='text' id="id_name" />''',
'<input id="id_name" type="text" />')
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />')
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""",
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""")
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""", """
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""")
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertTrue(dom1 in dom2)
self.assertTrue(dom2 in dom1)
dom2 = parse_html('<div><p>foo</p></div>')
self.assertTrue(dom1 in dom2)
self.assertTrue(dom2 not in dom1)
self.assertFalse('<p>foo</p>' in dom2)
self.assertTrue('foo' in dom2)
# when a root element is used ...
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertTrue(dom1 in dom2)
dom1 = parse_html('<p>foo</p>')
self.assertTrue(dom1 in dom2)
dom1 = parse_html('<p>bar</p>')
self.assertTrue(dom1 in dom2)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo foo</p><p>foo</p>')
self.assertEqual(dom2.count('foo'), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count('bar'), 0)
self.assertEqual(dom2.count('class'), 0)
self.assertEqual(dom2.count('p'), 0)
self.assertEqual(dom2.count('o'), 2)
dom2 = parse_html('<p>foo</p><p>foo</p>')
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<div><div><p>foo</p></div></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>foo</p></p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>bar</p></p>')
self.assertEqual(dom2.count(dom1), 0)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual('<p>', '')
with self.assertRaises(AssertionError):
self.assertHTMLEqual('', '<p>')
with self.assertRaises(HTMLParseError):
parse_html('</p>')
def test_contains_html(self):
response = HttpResponse('''<body>
This is a form: <form action="" method="get">
<input type="text" name="Hello" />
</form></body>''')
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form action="" method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form action="" method="get">', html=True)
invalid_response = HttpResponse('''<body <bad>>''')
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, '<p></p>')
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse('<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>', html=True)
class XMLEqualTests(TestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
class SkippingExtraTests(TestCase):
fixtures = ['should_not_be_loaded.json']
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super(SkippingExtraTests, self).__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
self.assertRaisesMessage(ValueError, "[.*x+]y?", func1)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ['Enter a valid email address.']
self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid + ['Another error']})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'[email protected]': 'Wrong output'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'[email protected]': '[email protected]'}, {'aaa': ['Come on, gimme some well formatted data, dude.']})
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
'required': 'This is really required.',
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
class DoctestNormalizerTest(IgnoreAllDeprecationWarningsMixin, SimpleTestCase):
def test_normalizer(self):
from django.test.simple import make_doctest
suite = make_doctest("test_utils.doctest_output")
failures = unittest.TextTestRunner(stream=six.StringIO()).run(suite)
self.assertEqual(failures.failures, [])
# for OverrideSettingsTests
def fake_view(request):
pass
class FirstUrls:
urlpatterns = patterns('', url(r'first/$', fake_view, name='first'))
class SecondUrls:
urlpatterns = patterns('', url(r'second/$', fake_view, name='second'))
class OverrideSettingsTests(TestCase):
"""
#21518 -- If neither override_settings nor a settings_changed receiver
clears the URL cache between tests, then one of these two test methods will
fail.
"""
@override_settings(ROOT_URLCONF=FirstUrls)
def test_first(self):
reverse('first')
@override_settings(ROOT_URLCONF=SecondUrls)
def test_second(self):
reverse('second')
| {
"content_hash": "c1b0d222d4314eb68620dfc5c3c5debb",
"timestamp": "",
"source": "github",
"line_count": 660,
"max_line_length": 162,
"avg_line_length": 37.79848484848485,
"alnum_prop": 0.5974265442738605,
"repo_name": "wfxiang08/django178",
"id": "365341d50f6f8c9c29fb1b58bd20e8cbaddd8e9d",
"size": "24987",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/test_utils/tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42829"
},
{
"name": "HTML",
"bytes": "169506"
},
{
"name": "JavaScript",
"bytes": "75783"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "9164014"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from celery import group
from sh import ssh
from tasks import check_available_auth_methods, check_open_port
import libtorrent
import time
import sys
class TorrentScanner(object):
def __init__(self, torrent_file, collection_attempts, collection_duration):
self.torrent_file = torrent_file
self.max_peer_collection_attempts = collection_attempts
self.peer_collection_seconds = collection_duration
self.torrent_ip_list = []
self.open_ip_list = []
self.bruteforce_attempt_list = []
def get_peers_from_swarm(self):
session = libtorrent.session()
session.listen_on(6881, 6891)
info = libtorrent.torrent_info(self.torrent_file)
handler = session.add_torrent({'ti': info, 'save_path': './poc', 'connections_limit': '1000', 'num_want': '2000'})
handler.set_download_limit(2)
handler.set_upload_limit(2)
print 'starting: ', handler.name()
for i in xrange(0, self.max_peer_collection_attempts):
status = handler.status()
time.sleep(self.peer_collection_seconds)
for i in handler.get_peer_info():
if i.ip[0] not in self.torrent_ip_list:
self.torrent_ip_list.append(i.ip[0])
print "\npeers collected: ", len(self.torrent_ip_list)
def cleanup_list(self, l):
return [x for x in l if x is not None]
def find_open_ports(self):
job_list = []
if len(self.torrent_ip_list) > 0:
for ip in self.torrent_ip_list:
job_list.append(check_open_port.s(ip))
jobs = group(job_list)
results = jobs.apply_async()
self.open_ip_list = self.cleanup_list(results.join())
def check_for_weak_auth(self):
job_list = []
for ip in self.open_ip_list:
job_list.append(check_available_auth_methods.s(ip))
jobs = group(job_list)
results = jobs.apply_async()
self.bruteforce_attempt_list = self.cleanup_list(results.join())
print self.bruteforce_attempt_list
def password_bruteforce(self):
pass
| {
"content_hash": "930f3401355cb36a0c16f527586eca3a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 122,
"avg_line_length": 34.07936507936508,
"alnum_prop": 0.6101537028411738,
"repo_name": "danpilch/torrent-ssh-bruteforce",
"id": "bfd94a46160909a791a915b32da14cb4e2676d22",
"size": "2147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torrent/torrentscanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4123"
}
],
"symlink_target": ""
} |
'''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
import os
import logging
import struct
from plugins.helpers import spotlight_parser as spotlight_parser
from plugins.helpers.spotlight_filter import create_views_for_ios_db
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "SPOTLIGHT"
__Plugin_Friendly_Name = "Spotlight"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads spotlight indexes (user, volume, iOS)"
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "[email protected]"
__Plugin_Modes = "IOS,MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = "This module reads spotlight's index database file found at: /.Spotlight-V100/Store-V2/<UUID>/store.db and "\
"also '.store.db' at the same location. Since macOS 10.13, there are also spotlight databases for each "\
"user under ~/Library/Metadata/CoreSpotlight/index.spotlightV3/ \niOS spotlight databases are also "\
"parsed. These would be found here: /private/var/mobile/Library/Spotlight/CoreSpotlight/*/index.spotlightV2"
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
writer = None
mac_info_obj = None
spotlight_parser.log = logging.getLogger('MAIN.' + __Plugin_Name + '.SPOTLIGHT_PARSER')
def ProcessStoreItem(item, id_as_hex):
'''Reads a single store item and processes it for output. Returns dictionary'''
try:
data_dict = {}
if id_as_hex:
id_hex_str = f'{(item.id & (2**64-1)):X}'
if len(id_hex_str) % 2:
id_hex_str = '0' + id_hex_str
reversed_id = ''
for x in range(-1, -len(id_hex_str), -2):
reversed_id += id_hex_str[x-1] + id_hex_str[x]
data_dict['ID_hex'] = id_hex_str
data_dict['Parent_ID_hex'] = f'{(item.parent_id & (2**64-1)):X}'
data_dict['ID_hex_reversed'] = reversed_id
else:
data_dict['ID'] = item.id
data_dict['Parent_ID'] = item.parent_id
data_dict['Flags'] = item.flags
data_dict['Date_Updated'] = item.ConvertEpochToUtcDateStr(item.date_updated)
for k, v in list(item.meta_data_dict.items()):
orig_debug = v
if type(v) == list:
if len(v) == 1:
v = v[0]
if type(v) == str:
if v.endswith('\x16\x02'):
v = v[:-2]
else:
v = ', '.join([str(x) for x in v])
data_dict[k] = v
return data_dict
except Exception as ex:
log.exception ("Failed while processing row data before writing")
def ProcessStoreItems(store_items, id_as_hex=False):
global writer
try:
data_list = []
for item in store_items:
data = ProcessStoreItem(item, id_as_hex)
if data:
data_list.append(data)
writer.WriteRows(data_list)
except Exception as ex:
log.exception ("Failed to write row data")
def Get_Column_Info(store):
'''Returns a list of columns with data types for use with writer'''
if store.is_ios_store:
data_info = [ ('ID_hex',DataType.TEXT),('ID_hex_reversed',DataType.TEXT),('Flags',DataType.INTEGER),
('Parent_ID_hex',DataType.TEXT),('Date_Updated',DataType.TEXT) ]
else:
data_info = [ ('ID',DataType.INTEGER),('Flags',DataType.INTEGER),
('Parent_ID',DataType.INTEGER),('Date_Updated',DataType.TEXT) ]
for _, prop in list(store.properties.items()):
# prop = [name, prop_type, value_type]
if prop[0] in ('_kMDXXXX___DUMMY', 'kMDStoreAccumulatedSizes') : continue # skip this
if prop[2] in [0, 2, 6, 7]:
if prop[1] & 2 == 2: # Multiple items
val_type = DataType.TEXT
else:
val_type = DataType.INTEGER
else:
val_type = DataType.TEXT
data_info.append((prop[0], val_type))
return data_info
def CopyOutputParams(output_params):
'''Creates and returns a copy of MacInfo.OutputParams object'''
op_copy = OutputParams()
op_copy.output_path = output_params.output_path
op_copy.write_csv = output_params.write_csv
op_copy.write_tsv = output_params.write_tsv
op_copy.write_sql = output_params.write_sql
op_copy.write_xlsx = output_params.write_xlsx
op_copy.xlsx_writer = output_params.xlsx_writer
op_copy.output_db_path = output_params.output_db_path
op_copy.export_path = output_params.export_path
op_copy.export_log_sqlite = output_params.export_log_sqlite
op_copy.timezone = output_params.timezone
return op_copy
def EnableSqliteDb(output_path, out_params, file_name_prefix):
try:
sqlite_path = os.path.join(output_path, file_name_prefix + "_spotlight.db")
log.info("Creating sqlite db for spotlight output @ {}".format(sqlite_path))
out_params.output_db_path = SqliteWriter.CreateSqliteDb(sqlite_path)
out_params.write_sql = True
return True
except (sqlite3.Error, OSError) as ex:
log.info('Sqlite db could not be created at : ' + sqlite_path)
log.exception('Exception occurred when trying to create Sqlite db')
return False
def GetFileData(path, user):
'''Get entire file data - ios Only'''
global mac_info_obj
data = b''
if mac_info_obj != None:
mac_info_obj.ExportFile(path, __Plugin_Name, user + '_', False)
f = mac_info_obj.Open(path)
if f:
data = f.read()
else:
log.error("Failed to open file {}".format(path))
else: # For single artifact mode
with open(path, 'rb') as f:
data = f.read()
return data
def GetMapDataOffsetHeader(input_folder, id, user):
''' Given an id X, this returns the data from 3 files,
dbStr-X.map.data, dbStr-X.map.header, dbStr-X.map.offsets. It will
search for these files in the input_folder.
Returns tuple (data, offsets, header)
'''
if mac_info_obj == None: # single artifact mode
data_path = os.path.join(input_folder, 'dbStr-{}.map.data'.format(id))
offsets_path = os.path.join(input_folder, 'dbStr-{}.map.offsets'.format(id))
header_path = os.path.join(input_folder, 'dbStr-{}.map.header'.format(id))
else:
data_path = input_folder + '/dbStr-{}.map.data'.format(id)
offsets_path = input_folder + '/dbStr-{}.map.offsets'.format(id)
header_path = input_folder + '/dbStr-{}.map.header'.format(id)
map_data = GetFileData(data_path, user)
offsets_data = GetFileData(offsets_path, user)
header_data = GetFileData(header_path, user)
return (map_data, offsets_data, header_data)
def ProcessStoreDb(input_file_path, input_file, output_path, output_params, items_to_compare, file_name_prefix, limit_output_types=True, no_path_file=False, user=""):
'''Main spotlight store.db processing function
file_name_prefix is used to name the excel sheet or sqlite table, as well as prefix for name of paths_file.
limit_output_types=True will only write to SQLITE, else all output options are honored. This is for faster
processing, as writing to excel is very slow. We will still try to honor user preference if the db is small.
items_to_compare is a dictionary used to compare and only write new items not present already
'''
items = {}
global writer
output_path_full_paths = os.path.join(output_path, file_name_prefix + '_fullpaths.tsv')
output_path_data = os.path.join(output_path, file_name_prefix + '_data.txt')
log.info('Processing ' + input_file_path)
try:
if not os.path.exists(output_path):
log.info("Creating output folder for spotlight at {}".format(output_path))
os.makedirs(output_path)
with open(output_path_data, 'wb') as output_file:
output_paths_file = None
store = spotlight_parser.SpotlightStore(input_file)
if store.is_ios_store: # The properties, categories and indexes must be stored in external files
input_folder = os.path.dirname(input_file_path)
try:
prop_map_data, prop_map_offsets,prop_map_header = GetMapDataOffsetHeader(input_folder, 1, user)
cat_map_data, cat_map_offsets, cat_map_header = GetMapDataOffsetHeader(input_folder, 2, user)
idx_1_map_data, idx_1_map_offsets, idx_1_map_header = GetMapDataOffsetHeader(input_folder, 4, user)
idx_2_map_data, idx_2_map_offsets, idx_2_map_header = GetMapDataOffsetHeader(input_folder, 5, user)
store.ParsePropertiesFromFileData(prop_map_data, prop_map_offsets, prop_map_header)
store.ParseCategoriesFromFileData(cat_map_data, cat_map_offsets, cat_map_header)
store.ParseIndexesFromFileData(idx_1_map_data, idx_1_map_offsets, idx_1_map_header, store.indexes_1)
store.ParseIndexesFromFileData(idx_2_map_data, idx_2_map_offsets, idx_2_map_header, store.indexes_2, has_extra_byte=True)
store.ReadPageIndexesAndOtherDefinitions(True)
except:
log.exception('Failed to find or process one or more dependency files. Cannot proceed!')
return None
##
else:
store.ReadPageIndexesAndOtherDefinitions()
## create db, write table with fields.
out_params = CopyOutputParams(output_params)
if limit_output_types and (store.block0.item_count > 500): # Large db, limit to sqlite output
log.warning('Since the spotlight database is large, only Sqlite output will be written!')
out_params.write_xlsx = False
out_params.write_csv = False
out_params.write_tsv = False
if not out_params.write_sql: # sql is not enabled, must initialize database!
if not EnableSqliteDb(output_path, out_params, file_name_prefix): return None
try:
log.debug ("Trying to write extracted store data for {}".format(file_name_prefix))
data_type_info = Get_Column_Info(store)
writer = DataWriter(out_params, "Spotlight-" + file_name_prefix, data_type_info, input_file_path)
except (sqlite3.Error, ValueError, OSError) as ex:
log.exception ("Failed to initilize data writer")
return None
total_items_parsed = store.ParseMetadataBlocks(output_file, items, items_to_compare, ProcessStoreItems)
writer.FinishWrites()
if total_items_parsed == 0:
log.debug('Nothing was parsed from this file!')
# create Views in ios/user style db
if store.is_ios_store and (total_items_parsed > 0):
create_views_for_ios_db(writer.sql_writer.filepath, writer.sql_writer.table_name)
# Write Paths db as tsv
if (not store.is_ios_store) and (not no_path_file):
path_type_info = [ ('ID',DataType.INTEGER),('FullPath',DataType.TEXT) ]
fullpath_writer = DataWriter(out_params, "Spotlight-" + file_name_prefix + '-paths', path_type_info, input_file_path)
with open(output_path_full_paths, 'wb') as output_paths_file:
log.info('Inodes and Path information being written to {}'.format(output_path_full_paths))
output_paths_file.write(b"Inode_Number\tFull_Path\r\n")
if items_to_compare:
items_to_compare.update(items) # This updates items_to_compare !
WriteFullPaths(items, items_to_compare, output_paths_file, fullpath_writer)
else:
WriteFullPaths(items, items, output_paths_file, fullpath_writer)
if out_params.write_sql:
CreateViewAndIndexes(data_type_info, fullpath_writer.sql_writer, file_name_prefix)
fullpath_writer.FinishWrites()
return items
except Exception as ex:
log.exception('Exception processing spotlight store db file')
def CreateViewAndIndexes(data_type_info, sql_writer, file_name_prefix):
desired = ['kMDItemContentTypeTree', 'kMDItemContentType', 'kMDItemKind', 'kMDItemMediaTypes',
'_kMDItemOwnerUserID', '_kMDItemOwnerGroupID', 'kMDItemUserCreatedUserHandle', 'kMDItemUserModifiedUserHandle',
'kMDItemUserPrintedUserHandle', '_kMDItemFileName', 'kMDItemDisplayName', 'kMDItemAlternateNames',
'kMDItemTitle', 'kMDItemPhysicalSize', 'kMDItemLogicalSize', 'kMDItemDurationSeconds', 'kMDItemPixelHeight',
'kMDItemPixelWidth', 'kMDItemColorSpace', 'kMDItemWhereFroms', 'kMDItemURL', 'kMDItemSubject',
'kMDItemRecipientEmailAddresses', 'kMDItemPrimaryRecipientEmailAddresses', 'kMDItemAdditionalRecipientEmailAddresses',
'kMDItemHiddenAdditionalRecipientEmailAddresses', 'kMDItemCountry', 'kMDItemCity', 'kMDItemStateOrProvince',
'kMDItemPhoneNumbers', 'kMDItemAuthors', 'kMDItemComment', 'kMDItemAlbum', 'kMDItemComposer',
'kMDItemMusicalGenre', 'kMDItemRecordingYearkMDItemAcquisitionModel', 'kMDItemExposureProgram',
'kMDItemLatitude', 'kMDItemLongitude', 'kMDItemTimestamp', 'kMDItemGPSDateStamp', '_kMDItemContentChangeDate',
'_kMDItemCreationDate', 'kMDItemContentCreationDate', 'kMDItemContentModificationDate', 'kMDItemDateAdded',
'kMDItemUsedDates', 'kMDItemLastUsedDate', 'kMDItemUseCount', 'kMDItemUserCreatedDate', 'kMDItemUserModifiedDate',
'kMDItemUserPrintedDate', 'kMDItemDownloadedDate', 'kMDItemCFBundleIdentifier', 'kMDItemCreator'
]
columns = []
for prop in desired:
for item in data_type_info:
if item[0] == prop:
columns.append(prop)
break
query = "CREATE VIEW 'SpotlightDataView-{}' AS SELECT s.ID, Flags, Date_Updated, p.FullPath, ".format(file_name_prefix) +\
", ".join(columns) + " FROM 'Spotlight-{}' as s".format(file_name_prefix) +\
" LEFT JOIN 'Spotlight-{}-paths' as p ON s.ID=p.ID WHERE s.ID > 1".format(file_name_prefix)
success, cursor, error_message = sql_writer.RunQuery(query)
if success:
log.info("VIEW 'SpotlightDataView-{}' created for spotlight data in database".format(file_name_prefix) )
else:
log.error("Failed to create VIEW 'SpotlightDataView-{}'".format(file_name_prefix))
log.error("Error was : {}".format(error_message))
# # creating indexes, commented out for now
# log.debug("Trying to add indexes")
# query = "CREATE INDEX '{0}_idx_all' ON 'Spotlight-{0}' ({1})".format(file_name_prefix, ", ".join(columns))
# success, cursor, error_message = sql_writer.RunQuery(query)
# if success:
# log.info("Indexes created for 'Spotlight-{}'".format(file_name_prefix))
# else:
# log.error("Failed to create Indexes 'Spotlight-{}'".format(file_name_prefix))
# log.error("Error was : {}".format(error_message))
# query = "CREATE INDEX '{0}_idx_paths' ON 'Spotlight-{0}-paths' (ID, FullPath)".format(file_name_prefix, ", ".join(columns))
# success, cursor, error_message = sql_writer.RunQuery(query)
# if success:
# log.info("Indexes created for 'Spotlight-{}'".format(file_name_prefix))
# else:
# log.error("Failed to create Indexes 'Spotlight-{}'".format(file_name_prefix))
# log.error("Error was : {}".format(error_message))
def WriteFullPaths(items, all_items, output_paths_file, fullpath_writer):
'''
Writes inode and full paths table to csv
items = dictionary of items to write
all_items = dictionary of items to recursively search full paths
'''
path_list = []
for k,v in list(items.items()):
name = v[2]
if name:
fullpath = spotlight_parser.RecursiveGetFullPath(v, all_items)
to_write = str(k) + '\t' + fullpath + '\r\n'
output_paths_file.write(to_write.encode('utf-8', 'backslashreplace'))
path_list.append([k, fullpath])
fullpath_writer.WriteRows(path_list)
def DropReadme(output_folder, message, filename='Readme.txt'):
try:
if not os.path.exists(output_folder):
log.info("Creating output folder for {} at {}".format(filename, output_folder))
os.makedirs(output_folder)
output_file_path = os.path.join(output_folder, filename)
with open(output_file_path, 'wb') as output_file:
output_file.write(message.encode('utf-8') + b'\r\n')
except OSError as ex:
log.exception('Exception writing file - {}'.format(filename))
def ReadVolumeConfigPlistFromImage(mac_info, file_path):
success, plist, error = mac_info.ReadPlist(file_path)
if success:
ReadVolumeConfigPlist(plist, mac_info.output_params, file_path)
else:
log.error('Failed to read plist {} \r\nError was: {}'.format(file_path, error))
def ReadVolumeConfigPlist(plist, output_params, file_path):
'''Reads VolumeConfiguration.plist and gets store configurations'''
log.info("Trying to get spotlight configuration from {}".format(file_path))
config_info = [('StoreUUID',DataType.TEXT),('StoreCreationDate',DataType.DATE),
('Version',DataType.TEXT),('IndexVersion',DataType.INTEGER),
('PartialPath',DataType.TEXT),('ConfigurationModificationDate',DataType.DATE),
('ConfigurationModificationVersion',DataType.TEXT),('ConfigurationVolumeUUID',DataType.TEXT),
('Source',DataType.TEXT)
]
configs_list = []
stores = plist.get('Stores', None)
if stores:
log.info (str(len(stores)) + " store(s) found")
for k, v in list(stores.items()):
store_uuid = k
config = [ store_uuid, v.get('CreationDate', None),
v.get('CreationVersion', ''), v.get('IndexVersion', 0),
v.get('PartialPath', ''), plist.get('ConfigurationModificationDate', None),
plist.get('ConfigurationModificationVersion', ''), plist.get('ConfigurationVolumeUUID', ''),
file_path
]
configs_list.append(config)
WriteList("spotlight store configuration", "SpotlightConfig", configs_list, config_info, output_params, file_path)
else:
log.info ("No spotlight stores defined in plist!")
def ProcessStoreAndDotStore(mac_info, store_path_1, store_path_2, prefix):
items_1 = None
items_2 = None
if mac_info.IsValidFilePath(store_path_1):
mac_info.ExportFile(store_path_1, __Plugin_Name, prefix + '_', False)
log.info('Now processing file {} '.format(store_path_1))
# Process store.db here
input_file = mac_info.Open(store_path_1)
output_folder = os.path.join(mac_info.output_params.output_path, 'SPOTLIGHT_DATA', prefix)
if input_file != None:
table_name = prefix + '-store'
log.info("Spotlight data for user='{}' db='{}' will be saved with table/sheet name as {}".format(prefix, 'store.db', table_name))
items_1 = ProcessStoreDb(store_path_1, input_file, output_folder, mac_info.output_params, None, table_name, True, True, prefix)
if mac_info.IsValidFilePath(store_path_2):
mac_info.ExportFile(store_path_2, __Plugin_Name, prefix + '_', False)
log.info('Now processing file {}'.format(store_path_2))
# Process .store.db here
input_file = mac_info.Open(store_path_2)
output_folder = os.path.join(mac_info.output_params.output_path, 'SPOTLIGHT_DATA', prefix)
if input_file != None:
if items_1:
log.info('Only newer items not found in store.db will be written out!')
DropReadme(output_folder, 'Items already present in store.db were ignored when processing the'\
'.store.db file. Only new or updated items are shown in the .store-DIFF* '\
'files. If you want the complete output, process the exported .store.db '\
'file with mac_apt_single_plugin.py and this plugin')
table_name = prefix + '-.store-DIFF'
log.info("Spotlight store for user='{}' db='{}' will be saved with table/sheet name as {}".format(prefix, '.store.db', table_name))
items_2 = ProcessStoreDb(store_path_2, input_file, output_folder, mac_info.output_params, items_1, table_name, True, True, prefix)
def Process_User_DBs(mac_info):
'''
Process the databases located in /Users/<USER>/Library/Metadata/CoreSpotlight/index.spotlightV3/
Seen in High Sierra (10.13) and above
'''
user_spotlight_store = '{}/Library/Metadata/CoreSpotlight/index.spotlightV3/store.db'
user_spotlight_dot_store = '{}/Library/Metadata/CoreSpotlight/index.spotlightV3/.store.db'
processed_paths = []
for user in mac_info.users:
user_name = user.user_name
if user.home_dir == '/private/var/empty': continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root': user_name = 'root' # Some other users use the same root folder, we will list such all users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.append(user.home_dir)
store_path_1 = user_spotlight_store.format(user.home_dir)
store_path_2 = user_spotlight_dot_store.format(user.home_dir)
ProcessStoreAndDotStore(mac_info, store_path_1, store_path_2, user_name)
def ProcessVolumeStore(mac_info, spotlight_base_path, export_prefix=''):
'''
Process the main Spotlight-V100 database usually found on the volume's root.
'''
spotlight_folder = spotlight_base_path + '/Store-V2/'
vol_config_plist_path = spotlight_base_path + '/VolumeConfiguration.plist'
if mac_info.IsValidFilePath(vol_config_plist_path):
mac_info.ExportFile(vol_config_plist_path, __Plugin_Name, export_prefix, False)
ReadVolumeConfigPlistFromImage(mac_info, vol_config_plist_path)
folders = mac_info.ListItemsInFolder(spotlight_folder, EntryType.FOLDERS)
index = 0
for folder in folders:
index += 1
uuid = folder['name']
store_path_1 = spotlight_folder + uuid + '/store.db'
store_path_2 = spotlight_folder + uuid + '/.store.db'
items_1 = None
items_2 = None
if mac_info.IsValidFilePath(store_path_1):
sub_folder = os.path.join(__Plugin_Name, str(index) + "_" + uuid)
mac_info.ExportFile(store_path_1, sub_folder, '', False)
log.info('Now processing file {} '.format(store_path_1))
# Process store.db here
input_file = mac_info.Open(store_path_1)
output_folder = os.path.join(mac_info.output_params.output_path, 'SPOTLIGHT_DATA', uuid)
if input_file != None:
table_name = ((export_prefix + '_') if export_prefix else '') + str(index) + '-store'
log.info("Spotlight data for uuid='{}' db='{}' will be saved with table/sheet name as {}".format(uuid, 'store.db', table_name))
items_1 = ProcessStoreDb(store_path_1, input_file, output_folder, mac_info.output_params, None, table_name, True, False, '')
else:
log.debug('File not found: {}'.format(store_path_1))
if mac_info.IsValidFilePath(store_path_2):
mac_info.ExportFile(store_path_2, sub_folder, '', False)
log.info('Now processing file {}'.format(store_path_2))
# Process .store.db here
input_file = mac_info.Open(store_path_2)
output_folder = os.path.join(mac_info.output_params.output_path, 'SPOTLIGHT_DATA', uuid)
if input_file != None:
if items_1:
log.info('Only newer items not found in store.db will be written out!')
DropReadme(output_folder, 'Items already present in store.db were ignored when processing the'\
'.store.db file. Only new or updated items are shown in the .store-DIFF* '\
'files. If you want the complete output, process the exported .store.db '\
'file with mac_apt_single_plugin.py and this plugin')
table_name = ((export_prefix + '_') if export_prefix else '') + str(index) + '-.store-DIFF'
log.info("Spotlight store for uuid='{}' db='{}' will be saved with table/sheet name as {}".format(uuid, '.store.db', table_name))
items_2 = ProcessStoreDb(store_path_2, input_file, output_folder, mac_info.output_params, items_1, table_name, True, False, '')
else:
log.debug('File not found: {}'.format(store_path_2))
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
global mac_info_obj
mac_info_obj = mac_info
Process_User_DBs(mac_info) # Usually small , 10.13+ only
spotlight_base_path = '/.Spotlight-V100'
if mac_info.IsValidFolderPath(spotlight_base_path):
ProcessVolumeStore(mac_info, spotlight_base_path)
else:
# For live/zip volume, Data may need to be accessed here:
spotlight_base_path = '/System/Volumes/Data/.Spotlight-V100'
if mac_info.IsValidFolderPath(spotlight_base_path):
ProcessVolumeStore(mac_info, spotlight_base_path, 'DataVolume')
# For catalina's read-only volume
spotlight_base_path = '/private/var/db/Spotlight-V100/BootVolume'
if mac_info.IsValidFolderPath(spotlight_base_path):
ProcessVolumeStore(mac_info, spotlight_base_path, 'BootVolume')
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
for input_path in input_files_list:
log.debug("Input file passed was: " + input_path)
if os.path.basename(input_path).lower().endswith('store.db'):
try:
with open(input_path, 'rb') as input_file:
output_folder = os.path.join(output_params.output_path, 'SPOTLIGHT_DATA')
log.info('Now processing file {}'.format(input_path))
ProcessStoreDb(input_path, input_file, output_folder, output_params, None, os.path.basename(input_path), False, False, '')
except (OSError):
log.exception('Failed to open input file ' + input_path)
else:
log.info("Unknown file type: {}".format(os.path.basename()))
def Plugin_Start_Ios(ios_info):
'''Entry point for ios_apt plugin'''
global mac_info_obj
mac_info_obj = ios_info
ios_spotlight_folders = [
'/private/var/mobile/Library/Spotlight/CoreSpotlight/NSFileProtectionComplete/index.spotlightV2',
'/private/var/mobile/Library/Spotlight/CoreSpotlight/NSFileProtectionCompleteUnlessOpen/index.spotlightV2',
'/private/var/mobile/Library/Spotlight/CoreSpotlight/NSFileProtectionCompleteUntilFirstUserAuthentication/index.spotlightV2'
]
for folder in ios_spotlight_folders:
store_path_1 = os.path.join(folder, 'store.db')
store_path_2 = os.path.join(folder, '.store.db')
prefix = folder.split('/')[-2]
ProcessStoreAndDotStore(ios_info, store_path_1, store_path_2, prefix)
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | {
"content_hash": "07c8fca6126cd5c4e7c9c85414f7d0bd",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 180,
"avg_line_length": 55.836223506743735,
"alnum_prop": 0.612063908347424,
"repo_name": "ydkhatri/mac_apt",
"id": "70916e72dca8e24c3eb848f3669d558288de8909",
"size": "28979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/spotlight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Kaitai Struct",
"bytes": "19375"
},
{
"name": "Python",
"bytes": "1315164"
}
],
"symlink_target": ""
} |
from support import submodule
| {
"content_hash": "4f588e954edf25a31a807c72a831116d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.8666666666666667,
"repo_name": "ploxiln/fabric",
"id": "4ceecbf7876ce1ee09c74f09b32030639e89ebb6",
"size": "30",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/support/deep.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "469937"
}
],
"symlink_target": ""
} |
"""
sentry.utils.sourcemaps
~~~~~~~~~~~~~~~~~~~~~~~
Originally based on https://github.com/martine/python-sourcemap
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import bisect
from collections import namedtuple
from urlparse import urljoin
from sentry.utils import json
SourceMap = namedtuple('SourceMap', ['dst_line', 'dst_col', 'src', 'src_line', 'src_col', 'name'])
SourceMapIndex = namedtuple('SourceMapIndex', ['states', 'keys', 'sources', 'content'])
# Mapping of base64 letter -> integer value.
B64 = dict(
(c, i) for i, c in
enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/')
)
def parse_vlq(segment):
"""
Parse a string of VLQ-encoded data.
Returns:
a list of integers.
"""
values = []
cur, shift = 0, 0
for c in segment:
val = B64[c]
# Each character is 6 bits:
# 5 of value and the high bit is the continuation.
val, cont = val & 0b11111, val >> 5
cur += val << shift
shift += 5
if not cont:
# The low bit of the unpacked value is the sign.
cur, sign = cur >> 1, cur & 1
if sign:
cur = -cur
values.append(cur)
cur, shift = 0, 0
if cur or shift:
raise Exception('leftover cur/shift in vlq decode')
return values
def parse_sourcemap(smap):
"""
Given a sourcemap json object, yield SourceMap objects as they are read from it.
"""
sources = smap['sources']
sourceRoot = smap.get('sourceRoot')
names = smap['names']
mappings = smap['mappings']
lines = mappings.split(';')
if sourceRoot:
sources = [
urljoin(sourceRoot, src)
for src in sources
]
dst_col, src_id, src_line, src_col, name_id = 0, 0, 0, 0, 0
for dst_line, line in enumerate(lines):
segments = line.split(',')
dst_col = 0
for segment in segments:
if not segment:
continue
parse = parse_vlq(segment)
dst_col += parse[0]
src = None
name = None
if len(parse) > 1:
src_id += parse[1]
src = sources[src_id]
src_line += parse[2]
src_col += parse[3]
if len(parse) > 4:
name_id += parse[4]
name = names[name_id]
assert dst_line >= 0
assert dst_col >= 0
assert src_line >= 0
assert src_col >= 0
yield SourceMap(dst_line, dst_col, src, src_line, src_col, name)
def sourcemap_to_index(sourcemap):
smap = json.loads(sourcemap)
state_list = []
key_list = []
src_list = set()
content = None
if 'sourcesContent' in smap:
content = {}
for idx, source in enumerate(smap['sources']):
if smap['sourcesContent'][idx]:
content[source] = smap['sourcesContent'][idx].splitlines()
else:
content[source] = []
for state in parse_sourcemap(smap):
state_list.append(state)
key_list.append((state.dst_line, state.dst_col))
src_list.add(state.src)
return SourceMapIndex(state_list, key_list, src_list, content)
def find_source(indexed_sourcemap, lineno, colno):
# error says "line no 1, column no 56"
assert lineno > 0, 'line numbers are 1-indexed'
return indexed_sourcemap.states[bisect.bisect_left(indexed_sourcemap.keys, (lineno - 1, colno)) - 1]
| {
"content_hash": "7745308e8a713a1d1e460ecbd400c5b5",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 104,
"avg_line_length": 26.903703703703705,
"alnum_prop": 0.5619493392070485,
"repo_name": "rdio/sentry",
"id": "065781ec1b2872e1987b7b0212e11841784ea1b3",
"size": "3632",
"binary": false,
"copies": "1",
"ref": "refs/heads/rdio_sentry_6.4.4",
"path": "src/sentry/utils/sourcemaps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "533425"
},
{
"name": "HTML",
"bytes": "258193"
},
{
"name": "JavaScript",
"bytes": "916843"
},
{
"name": "Makefile",
"bytes": "2982"
},
{
"name": "Python",
"bytes": "2881969"
},
{
"name": "Ruby",
"bytes": "8042"
}
],
"symlink_target": ""
} |
"""
Created on Tue Oct 11 09:49:40 2016
@author: dennis
"""
from geometry_msgs.msg import PoseStamped, TwistStamped, Vector3Stamped
from nav_msgs.msg import Path
import rospy
class state():
def __init__(self, lock, driver):
self._lock = lock
self.driver = driver
## states
# pose
self._pose_pub = rospy.Publisher('mavros/setpoint_position/local', PoseStamped, queue_size=10)
self._pose_msg = PoseStamped()
self._pose_state = "posctr"
# vel
self._vel_pub = rospy.Publisher('mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10 )
self._vel_msg = TwistStamped()
self._vel_state = "velctr"
# acc
self._accel_pub = rospy.Publisher('mavros/setpoint_accel/accel', Vector3Stamped, queue_size=10)
self._accel_msg = Vector3Stamped()
self._accel_state = "accelctr"
# path
self._bezier_pub = rospy.Publisher('path/bezier_pt', Path, queue_size=10)
self._bezier_msg = Path()
self._bezier_state = "bezier"
# default initialization
self.set_state("posctr")
# initial desired position: position and orientation
ps = [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0]
self.set_msg(ps)
def set_state(self, arg):
self._lock.acquire()
if arg == "posctr":
self.state = self._pose_state
self.msg = self._pose_msg
self.pub = self._pose_pub
elif arg == "velctr":
self.state = self._vel_state
self.msg = self._vel_msg
self.pub = self._vel_pub
elif arg == "accelctr":
self.state = self._accel_state
self.msg = self._accel_msg
self.pub = self._accel_pub
elif arg == "bezier":
self.state = self._bezier_state
self.msg = self._bezier_msg
self.pub = self._bezier_pub
else:
print "this state is not supported"
self._lock.release()
def set_msg(self, arg):
if self.state == "posctr":
if len(arg) == 7:
self._lock.acquire()
self._pose_msg.pose.position.x = arg[0]
self._pose_msg.pose.position.y = arg[1]
self._pose_msg.pose.position.z = arg[2]
self._pose_msg.pose.orientation.x = arg[3]
self._pose_msg.pose.orientation.y = arg[4]
self._pose_msg.pose.orientation.z = arg[5]
self._pose_msg.pose.orientation.w = arg[6]
self._lock.release()
else:
print "posctr requires array of len 7"
elif self.state == "velctr":
if len(arg) == 3:
self._lock.acquire()
self._vel_msg.twist.linear.x = arg[0]
self._vel_msg.twist.linear.y = arg[1]
self._vel_msg.twist.linear.z = arg[2]
#self._vel_msg.twist.angular.x = arg[3]
#self._vel_msg.twist.angular.y = arg[4]
#self._vel_msg.twist.angular.z = arg[5]
self._lock.release()
else:
print "velctr requires array of len 3"
elif self.state == "accelctr":
if len(arg) == 3:
self._lock.acquire()
self._accel_msg.vector.x = arg[0]
self._accel_msg.vector.y = arg[1]
self._accel_msg.vector.z = arg[2]
self._lock.acquire()
else:
print "accelctr requires array of len 3"
elif self.state == "bezier":
if len(arg) == 3:
# initialize
poses = []
# loop through bezier points
for idx, pose in enumerate(arg):
p = PoseStamped()
p.pose.position.x = pose[0]
p.pose.position.y = pose[1]
p.pose.position.z = pose[2]
poses.append(p)
self._lock.acquire()
self._bezier_msg.poses = poses
self._bezier_msg.header.stamp = rospy.get_rostime()
#self._bezier_msg.header.frame_id = "local_origin"
self._lock.release()
| {
"content_hash": "92b510dd7b186e05b52ff47d9d13c101",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 106,
"avg_line_length": 32.12925170068027,
"alnum_prop": 0.45966546686428117,
"repo_name": "Stifael/offboard",
"id": "0a3a8b16f367959aacfd3aafbbac6d471b4c0ba3",
"size": "4745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/state.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74326"
}
],
"symlink_target": ""
} |
import json
import logging
import time
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.hooks import MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from collections import OrderedDict
from datetime import date, datetime
from decimal import Decimal
from MySQLdb.constants import FIELD_TYPE
from tempfile import NamedTemporaryFile
class MySqlToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from MySQL to Google cloud storage in JSON format.
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
mysql_conn_id='mysql_default',
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
:param sql: The SQL to execute on the MySQL table.
:type sql: string
:param bucket: The bucket to upload to.
:type bucket: string
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: string
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: string
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filenamed param docs above). Google cloud storage allows for files
to be a maximum of 4GB. This param allows developers to specify the
file size of the splits.
:type approx_max_file_size_bytes: long
:param mysql_conn_id: Reference to a specific MySQL hook.
:type mysql_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
"""
super(MySqlToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.sql = sql;
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.mysql_conn_id = mysql_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
cursor = self._query_mysql()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = map(lambda schema_tuple: schema_tuple[0], cursor.description)
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = { self.filename.format(file_no): tmp_file_handle }
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
json.dump(row_dict, tmp_file_handle)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write('\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
field_mode = 'NULLABLE' if field[6] or field_type == 'TIMESTAMP' else 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
logging.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
json.dump(schema, tmp_schema_file_handle)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
"""
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
scope='https://www.googleapis.com/auth/devstorage.read_write',
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json')
@classmethod
def convert_types(cls, value):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats.
"""
if type(value) in (datetime, date):
return time.mktime(value.timetuple())
elif isinstance(value, Decimal):
return float(value)
else:
return value
@classmethod
def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING'
| {
"content_hash": "99b9d2c442db4063b1895837ef8b85b9",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 101,
"avg_line_length": 41.195238095238096,
"alnum_prop": 0.6086001618310022,
"repo_name": "wxiang7/airflow",
"id": "0eb368e3fa95e8b1f56dd0dd2d4f4aa16f2abb11",
"size": "8651",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/mysql_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36361"
},
{
"name": "HTML",
"bytes": "101378"
},
{
"name": "JavaScript",
"bytes": "889411"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "941256"
},
{
"name": "Shell",
"bytes": "11821"
}
],
"symlink_target": ""
} |
import os
from mpp.models import SQLTestCase
from mpp.lib.PSQL import PSQL
class runtest(SQLTestCase):
"""
@db_name memory_accounting
"""
sql_dir = 'sql/'
ans_dir = 'answer/'
out_dir = 'output/'
class verify(SQLTestCase):
def gp_version(self):
result = PSQL.run_sql_command( sql_cmd='select version()', flags='-t -q' )
if "HAWQ" in result:
return "hawq"
else:
return "gpdb"
def test_oom_count(self):
prd = "hawq"
if self.gp_version() == "gpdb":
prd = "gp"
# SQL command to find number of processes logging memory usage for single segment seg0 when a 4-slice query runs OOM
search = "%Logging memory usage%"
sql_command = """select count(*) from (select distinct logpid, logslice, logsegment from %s_toolkit.__%s_log_segment_ext where logmessage like '%s' and logtime >= (select logtime from %s_toolkit.__%s_log_master_ext where logmessage like 'statement: select 4 as oom_test;' order by logtime desc limit 1) and logsegment='seg0' group by logpid, logslice, logsegment order by logsegment,logslice desc) as foo;""" % (prd, prd, search, prd, prd)
result = PSQL.run_sql_command( sql_cmd='%s ' %(sql_command), flags='-t -q' )
# Verify that OOM log count is at least the number of slices in the query
self.failUnless(int(result) >= 4, 'OOM log count should be at least the number of slices. OOM log count is ' + result)
def test_dumpusage(self):
# Verify that .mem files are generated when test sets GUC gp_dump_memory_usage to on
MASTER_DATA_DIRECTORY = os.environ['MASTER_DATA_DIRECTORY']
pg_logdir = os.path.join(MASTER_DATA_DIRECTORY, 'pg_log')
mem_file = ''
for f in os.listdir(pg_logdir):
if f.endswith('.mem'):
mem_file = f
self.failUnless(mem_file != '', 'Memory usage files not generated with GUC gp_dump_memory_usage!')
| {
"content_hash": "c5322320cb032376ce94819378bda04e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 448,
"avg_line_length": 47.11904761904762,
"alnum_prop": 0.6306215260232441,
"repo_name": "cjcjameson/gpdb",
"id": "62825aa416b050081a99120ecd73ec37da483cab",
"size": "1979",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/resource_management/memory_accounting/scenario/oom_test/runsql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35862596"
},
{
"name": "C++",
"bytes": "3303631"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "736617"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268244"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "104559"
},
{
"name": "Makefile",
"bytes": "437242"
},
{
"name": "Objective-C",
"bytes": "41796"
},
{
"name": "PLSQL",
"bytes": "261677"
},
{
"name": "PLpgSQL",
"bytes": "5198576"
},
{
"name": "Perl",
"bytes": "3901323"
},
{
"name": "Perl 6",
"bytes": "8302"
},
{
"name": "Python",
"bytes": "8753134"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3895383"
},
{
"name": "Shell",
"bytes": "554130"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488779"
}
],
"symlink_target": ""
} |
import tweepy
from checkConnection import is_connected
import re
# Below function will hit the twitter api and retune all the tweet
def twitterData(consumer_key,consumer_secret,access_token,access_token_secret,twitter_id,numbr_of_tweets,since_id):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print "Finding out your internet connection"
internet_working = is_connected()
# handel error in case of authentication failure or connection error
if internet_working:
print "Internet is fine"
try:
print 'Parsing twitter API data... Wait for a while'
#for reference http://docs.tweepy.org/en/v3.5.0/api.html#API.user_timeline
all_tweet = api.user_timeline(id= twitter_id ,count = numbr_of_tweets, since_id = since_id)# here tweepy checkt the above user keys
print "User Authenticated Successfully"
return all_tweet
except tweepy.TweepError as error:
print error.message
else:
print "Check yout internet connection"
def tweetData(tweet):
#this function is for extracting the tweet data like tweet text, id from a object
#this return a tuple a according to the query
#in future release i will update the database to save emoji also
######################## for Emoticon removal from tweet.text##################
#to remove unicode emoji, so 1366: Incorrect
# string value: '\xF0\x9F\x9A\x97' for column 'tweet_text' at row 1 error can be avoided
try:
# Wide UCS-4 build
myre = re.compile(u'['
u'\U0001F300-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]+',
re.UNICODE)
except re.error:
# Narrow UCS-2 build
myre = re.compile(u'('
u'\ud83c[\udf00-\udfff]|'
u'\ud83d[\udc00-\ude4f\ude80-\udeff]|'
u'[\u2600-\u26FF\u2700-\u27BF])+',
re.UNICODE)
######################## End of Emoticon removal###############################
# myre.sub remove emoticon
# .encode used to convert unicode in to text
tweet_text = (myre.sub('', tweet.text)).encode('utf-8')
tweet_id = tweet.id
tweet_date = tweet.author.created_at.strftime('%Y-%m-%d %H:%M:%S')
user_id = tweet.author.screen_name
screen_name = tweet.author.name
language = tweet.author.lang
location = tweet.author.location.encode('utf-8-sig')
followers_count = tweet.author.followers_count
friends_count = tweet.author.friends_count
time_zone = tweet.author.time_zone
return (tweet_id,tweet_text,user_id,tweet_date,\
language,location,screen_name,followers_count,friends_count,time_zone)
if __name__ == "__main__":
consumer_key = "HXXXXXXXXXXXXXXXXn"
consumer_secret = "wXXXXXXXXXXXXXXXXXXxk"
access_token = "2XXXXXXXXXXXXXXXXXXXXXXXXhO"
access_token_secret = "qXXXXXXXXXXxZYl"
twitter_id = "@TrafflineDEL"
numbr_of_tweets = 100
twitter_data(consumer_key,consumer_secret,access_token,access_token_secret,twitter_id,numbr_of_tweets)
| {
"content_hash": "d26ea28b998327db1ff80932cfeb7742",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 134,
"avg_line_length": 41.16216216216216,
"alnum_prop": 0.6749835850295469,
"repo_name": "SkumarAG/twitterFeedExtractor",
"id": "ae81d51cee7435a9c22197b9954b89fd7f451215",
"size": "3046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/twitter_feed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9155"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .. import models
import factory
class CaseFactory(factory.django.DjangoModelFactory):
name = factory.Sequence('case-/{0}/'.format)
office = factory.SubFactory('foundation.offices.tests.factories.OfficeFactory')
created_by = factory.SubFactory('foundation.users.tests.factories.UserFactory')
class Meta:
model = models.Case
| {
"content_hash": "c57a539a00ccbf1e7fd24a1bd97e5519",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.7436548223350253,
"repo_name": "ad-m/foundation-manager",
"id": "845f462acc650638b5f4ed90cec6f3e9e99b8238",
"size": "394",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "foundation/cases/tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "205007"
},
{
"name": "HTML",
"bytes": "48803"
},
{
"name": "JavaScript",
"bytes": "284166"
},
{
"name": "Python",
"bytes": "155467"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
"""
Deletes an azure function
"""
from common.methods import set_progress
from infrastructure.models import CustomField
from common.methods import generate_string_from_template
import os, json
def run(job, **kwargs):
resource = kwargs.get('resource')
function_name = resource.attributes.get(field__name='azure_function_name').value
resource_group = resource.attributes.get(field__name='resource_group_name').value
set_progress("Deleting function...")
function_delete_command = "az functionapp delete --name {0} --resource-group {1}".format(function_name, resource_group)
os.system(function_delete_command)
return "Success", "The function has succefully been deleted.", "" | {
"content_hash": "d16f4d4c3852dfb2c3be3fd17a6e798e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 123,
"avg_line_length": 39,
"alnum_prop": 0.7421652421652422,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "1ff546546ea81bf3b09b5cb9a0106f144774e441",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/azure_functions/delete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
} |
'''
Templates for different types of sentences.
#NAME = name of the character
#NOM = name of second character
#GPR = gender pronoun
#POS = positive talking point or trait
#NEG = negative talking point or trait
#GNN = gender noun
#PNR = Gender reflexive pronoun
#GPN = Gender possessive pronoun
#NOP = Second person gender pronoun
#LOC = setting/location
#DET = determiner
#AMB = ambience
#PREP = preposition
#POS_LOC = a more positive, pleasant location
#NEG_LOC = a more negative location
#POS_AMB = an ambience that goes with a positive location
#NEG_AMB = an ambience that goes with a negative location
#NEUT_LOC = a location with both positive and negative aspects
#NEUT_AMB = ambient words that can be negative or positive or neutral
Possible differentiation of positive and negative settings in order to have
character reactions and thoughts that make sense in terms of them.
'''
GPR = {'male': 'he', 'female': 'she'}
NOP = {'male': 'he', 'female': 'she'}
GNN = {'male': 'man', 'female': 'woman'}
PNR = {'male': 'himself', 'female': 'herself'}
GPN = {'male': 'his', 'female': 'her'}
GPS = {'male': 'him', 'female': 'her'}
POS = [
"great",
"smart",
"cunning",
"kind",
"amiable",
"brilliant",
"aspiring",
"articulate",
"compassionate",
"confident",
"cultured",
"creative",
"innovative",
"dedicated",
"dignified",
"dutiful",
"elegant",
"freethinking",
"gallant",
"flexible",
"focused",
"multi-faceted",
"open",
"meticulous",
"peaceful",
"practical",
"scrupulous",
"strong",
"trustful",
"upright",
"venturesome",
"wise",
"witty",
"suave",
"steadfast",
"respectful",
"responsible",
"principled",
"patient",
"insouciant",
"humble",
"generous",
"gentle",
"adaptive",
"articulate",
"clever",
]
NEG = [
"sloppy",
"mean",
"forceful",
"prideful",
"blunt",
"brutal",
"calculating",
"careless",
"childish",
"conceited",
"fearful",
"foolish",
"gloomy",
"graceless",
"mawkish",
"stingy",
"negligent",
"moody",
"offhand",
"furtive",
"opinionated",
"paranoid",
"presumptuous",
"reactive",
"repressed",
"resentful",
"scornful",
"single-minded",
"superficial",
"superstitious",
"tactless",
"tasteless",
"oppressive",
"domineering",
"venomous",
"unloveable",
"troublesome",
"thoughtless",
"strong-willed",
"self-indulgent",
"fickle",
"erratic",
"dissolute",
"discourteous",
"dishonest",
"destructive",
"demanding",
"cruel",
"crude",
"aloof",
"assertive",
"asocial",
"apathetic",
"barbaric",
"arrogant",
"authoritarian",
]
CHARACTER_DESCRIPTIONS = [
"#NAME was as #POS as #GPR was #POS.",
"#NAME was as #NEG as #GPR was #NEG.",
#"#NOM was as #NEG as #GPR was #NEG.",
#"#NOM was as #POS as #GPR was #POS.",
"#NAME was as #NEG as #GPR was #POS.",
"#NAME was as #POS as #GPR was #NEG.",
#"#NOM was as #NEG as #GPR was #POS.",
#"#NOM was as #POS as #GPR was #NEG.",
#"#NAME was as #POS as #NOM was #NEG.",
#"#NOM was as #POS as #NAME was #NEG.",
"#NAME, a #POS and #POS #GNN, had a knack for getting people to do things.",
"#NAME, as a #NEG #GNN, often got what #GPR wanted.",
"A #POS #GNN, #NAME was widely respected in #GPN social circles.",
"Even #NAME's closest acquaintances steered clear of #GPS because they knew what a #NEG #GNN #GPR was.",
"#NAME, who appeared #POS and #POS, occasionally slipped up and let people see #GPN #NEG side.",
#"#NOM wanted people to think #GPR was only #NEG and #NEG. If people found out #GPR was also #POS, they would never look at #GPR the same.",
"A #POS and #POS #GNN, #NAME was well-liked.",
"Everyone thought #NAME was such a #POS #GNN, but #GPR was quite #NEG as well.",
"A #NEG and #NEG #GNN, #NAME was not well-liked.",
"People admired #NAME for #GPN #POS personality.",
"People didn't like #NAME because of #GPN #NEG character.",
#"#NOM was envious of how #POS #NAME was."
#"#NAME never understood why #NOM was so #NEG.",
#"Some people wouldn't like to be #NEG or #NEG, but #NOM embraced these qualities in #PNR.",
]
CHARACTER_RELATIONSHIPS = [
"#NOM wondered how #NAME came to be such a #NEG and #NEG person.",
"#NOM always looked up to #NAME for #GPR #POS personality."
]
ACTION_PAIRS_BUT = [
'#NAME #ACT #2NAME, but #GPR #2ACT #2NAME.',
# 'As #PADJ as #NAME was #GPR #ACT #2NAME, but realizing what #GPR had '+
# ' done #GPR #2ACT #2NAME.',
]
ACTION_PAIRS_AND = [
'#NAME #ACT #2NAME, and #GPR #2ACT #2NAME.',
]
ACTION_PAIRS_BECAUSE = [
'#NAME #ACT #2NAME, because #NAME #2ACT #2NAME.',
]
ACTION_PAIRS_YET = [
'#NAME #ACT #2NAME, yet #NAME #2ACT #2NAME.',
]
ACTION_PAIRS_SO = [
'#NAME #ACT #2NAME, so #NAME #2ACT #2NAME.',
]
SETTING_DESCRIPTIONS = [
"#PREP the #NEUT_AMB #NEUT_LOC, #NAME waited for #NOM. #GPR knew #NOM would be coming, because #NOP always came at this time.",
"#NAME looked around the #NEG_AMB #NEG_LOC, wondering how #GPR had ended up here.",
"#NAME looked around the #NEUT_AMB #NEUT_LOC, wondering how #GPR had ended up here.",
"#NAME looked around the #POS_AMB #POS_LOC, wondering how #GPR had ended up here.",
"The #POS_LOC was #POS_AMB and #POS_AMB, causing a smile to light #NAME's face.",
"#NAME began to feel weary while looking around the #NEG_AMB, #NEG_AMB #NEG_LOC.",
"Excitement coursed through #NAME's veins as #GPR entered the #POS_LOC.",
"Fear coursed through #NAME's veins as #GPR entered the #NEG_LOC.",
"The #NEG_AMB and #NEG_AMB of the #NEG_LOC made #NAME's knees shake, but #GPR couldn't show fear.",
"#NAME examined #GPN surroundings with an air of indifference. The #NEUT_AMB state of the #NEUT_LOC was neither welcoming nor excluding.",
"AS #NAME entered the #POS_LOC, #GPR was struck by the sound of children's laughter filling the air.",
"The #NEG_AMB #NEG_LOC made #NAME uncomfortable, but #GPR had no option but to stay there.",
"The #POS_LOC always reminded #NAME of home because of how #POS_AMB and #POS_AMB it was.",
"The #NEG_LOC #NAME found #PNR in was #NEG_AMB and #NEG_AMB.",
"The #POS_LOC #NAME found #PNR in was #POS_AMB and #POS_AMB.",
"#NAME quite liked the #POS_AMB #POS_LOC and frequented as often #GPR could",
"#NOM hated #NEG_AMB #NEG_LOC s like the one #NOP currently found #PNR in.",
"#NAME walked confidently through the #AMB #LOC.",
"It was late in the evening when #NAME found #PNR in a #NEUT_AMB #NEUT_LOC.",
"It was late in the evening when #NAME found #PNR in a #NEG_AMB #NEG_LOC.",
"It was late in the evening when #NAME found #PNR in a #POS_AMB #POS_LOC.",
"In the late evening, #NAME found #PNR in a #NEUT_AMB #NEUT_LOC.",
"In the late evening, #NAME found #PNR in a #NEG_AMB #NEG_LOC.",
"In the late evening, #NAME found #PNR in a #POS_AMB #POS_LOC.",
"In the late evening, #NAME encountered #NOM in a #NEUT_AMB #NEUT_LOC.",
"In the late evening, #NAME encountered #NOM in a #NEG_AMB #NEG_LOC.",
"In the late evening, #NAME encountered #NOM in a #POS_AMB #POS_LOC.",
"The #NEUT_LOC was #NEUT_AMB and #NEUT_AMB.",
"In the early morning, people trickled #PREP #DET #POS_LOC. It was #POS_AMB and #POS_AMB.",
"Many people avoided the #NEG_LOC, no one wanted to spend time there.",
"The #NEG_LOC was #NEG_AMB and #NEG_AMB.",
"The #POS_LOC was #POS_AMB and #POS_AMB.",
"Of all the places #NAME expected to find #NOM, #DET #POS_LOC was not one of them.",
"Of all the places #NAME expected to find #NOM, #DET #NEG_LOC was not one of them.",
"Of all the places #NAME expected to find #NOM, #DET #NEUT_LOC was not one of them.",
"#NAME wasn't fond on #NEG_AMB places, like the #NEG_LOC #GPR was currently in.",
"#NAME glanced around, appreciating the #POS_AMB quality of the #POS_LOC #GPR was currently in.",
]
PAIRS_BUT = [
"#ACTOR1 #VERB1 #ACTOR2, but "
]
| {
"content_hash": "160983cdf60c6a6620adda7d11726d3f",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 144,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.6217698683568991,
"repo_name": "assamite/cc-codecamp16",
"id": "65b9526fdad61467a49b2405d51fee0f01cee34e",
"size": "8204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "54157"
}
],
"symlink_target": ""
} |
"""Support for switch sensor using I2C PCAL9535A chip."""
import logging
from pcal9535a import PCAL9535A
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_I2C_BUS = "i2c_bus"
CONF_PINS = "pins"
CONF_STRENGTH = "strength"
STRENGTH_025 = "0.25"
STRENGTH_050 = "0.5"
STRENGTH_075 = "0.75"
STRENGTH_100 = "1.0"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
DEFAULT_I2C_BUS = 1
DEFAULT_STRENGTH = STRENGTH_100
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_STRENGTH, default=DEFAULT_STRENGTH): vol.In(
[STRENGTH_025, STRENGTH_050, STRENGTH_075, STRENGTH_100]
),
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PCAL9535A devices."""
invert_logic = config[CONF_INVERT_LOGIC]
i2c_address = config[CONF_I2C_ADDRESS]
bus = config[CONF_I2C_BUS]
pcal = PCAL9535A(bus, i2c_address)
switches = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
pin = pcal.get_pin(pin_num // 8, pin_num % 8)
switches.append(PCAL9535ASwitch(pin_name, pin, invert_logic))
add_entities(switches)
class PCAL9535ASwitch(SwitchDevice):
"""Representation of a PCAL9535A output pin."""
def __init__(self, name, pin, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._pin.inverted = invert_logic
self._pin.input = False
self._state = self._pin.level
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if optimistic updates are used."""
return True
def turn_on(self, **kwargs):
"""Turn the device on."""
self._pin.level = True
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._pin.level = False
self._state = False
self.schedule_update_ha_state()
| {
"content_hash": "c919b70315528da32a48ef84559584b8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 85,
"avg_line_length": 28.352941176470587,
"alnum_prop": 0.646957123098202,
"repo_name": "postlund/home-assistant",
"id": "87c8ced1b0d406319dc84bff3e2b16a74b4ba4a0",
"size": "2892",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/pcal9535a/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
"""@file
@brief driver for sending raw packets to pato
Copyright (c) 2014-2015 Dimitry Kloper <[email protected]>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
import os
import sys
import pdb
import unittest
import serial
localdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(localdir, '..'))
import pato.protocol as protocol
from util.crc import crc8
from test import logger
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = logger
cls.transport = serial.Serial(port='COM102',
baudrate=9600,
timeout=10)
@classmethod
def tearDownClass(cls):
cls.transport.close()
def test_send_single_ping(self):
packet = [protocol.Cmd.PING, 0, 0]
packet += [crc8(packet), 0]
packet = "".join([chr(c) for c in packet])
pdb.set_trace()
self.transport.write(packet)
| {
"content_hash": "29fd6f331b0d5f26fe2b0c6219796e0f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 70,
"avg_line_length": 34.25352112676056,
"alnum_prop": 0.7351973684210527,
"repo_name": "kloper/pato",
"id": "d42f072fd1bc69fe2a4fd99e54d3447694aceedf",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/test_packet.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "14538"
},
{
"name": "C",
"bytes": "147405"
},
{
"name": "C++",
"bytes": "29000"
},
{
"name": "Python",
"bytes": "95196"
}
],
"symlink_target": ""
} |
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
If you look at the full city data, you will notice that there are couple of values that seem to provide
the same information in different formats: "point" seems to be the combination of "wgs84_pos#lat" and "wgs84_pos#long".
However we do not know if that is the case and should check if they are equivalent.
Finish the function check_loc(). It will recieve 3 strings, first will be the combined value of "point" and then the
"wgs84_pos#" values separately. You have to extract the lat and long values from the "point" and compare
to the "wgs84_pos# values and return True or False.
Note that you do not have to fix the values, just determine if they are consistent. To fix them in this case
you would need more information. Feel free to discuss possible strategies for fixing this on the discussion forum.
The rest of the code is just an example on how this function can be used.
Changes to "process_file" function will not be take into account.
"""
import csv
import pprint
CITIES = 'cities.csv'
def check_loc(point, lat, longi):
# YOUR CODE HERE
points = point.split(' ')
return points[0] == lat and points[1] == longi
def process_file(filename):
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to check the location
result = check_loc(line["point"], line["wgs84_pos#lat"], line["wgs84_pos#long"])
if not result:
print "{}: {} != {} {}".format(line["name"], line["point"], line["wgs84_pos#lat"], line["wgs84_pos#long"])
data.append(line)
return data
def test():
assert check_loc("33.08 75.28", "33.08", "75.28") == True
assert check_loc("44.57833333333333 -91.21833333333333", "44.5783", "-91.2183") == False
if __name__ == "__main__":
test() | {
"content_hash": "98a6c50479f067d92163ce7b2837ff35",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 122,
"avg_line_length": 39.18867924528302,
"alnum_prop": 0.6706788637457872,
"repo_name": "napjon/moocs_solution",
"id": "e33cd0bf93eb922033f87f2ce4c777a099794466",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataWranglingMongoDB/Lesson3/location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4046"
},
{
"name": "Jupyter Notebook",
"bytes": "9892144"
},
{
"name": "Matlab",
"bytes": "300578"
},
{
"name": "Python",
"bytes": "441602"
},
{
"name": "R",
"bytes": "7797"
},
{
"name": "Shell",
"bytes": "681"
}
],
"symlink_target": ""
} |
import itertools
import numpy as np
import pytest
from numcodecs.checksum32 import CRC32, Adler32
from numcodecs.tests.common import (check_encode_decode, check_config, check_repr,
check_backwards_compatibility,
check_err_encode_object_buffer)
# mix of dtypes: integer, float, bool, string
# mix of shapes: 1D, 2D, 3D
# mix of orders: C, F
arrays = [
np.arange(1000, dtype='i4'),
np.linspace(1000, 1001, 1000, dtype='f8'),
np.random.normal(loc=1000, scale=1, size=(100, 10)),
np.random.randint(0, 2, size=1000, dtype=bool).reshape(100, 10, order='F'),
np.random.choice([b'a', b'bb', b'ccc'], size=1000).reshape(10, 10, 10)
]
codecs = [CRC32(), Adler32()]
def test_encode_decode():
for codec, arr in itertools.product(codecs, arrays):
check_encode_decode(arr, codec)
def test_errors():
for codec, arr in itertools.product(codecs, arrays):
enc = codec.encode(arr)
with pytest.raises(RuntimeError):
codec.decode(enc[:-1])
def test_config():
for codec in codecs:
check_config(codec)
def test_repr():
check_repr("CRC32()")
check_repr("Adler32()")
def test_backwards_compatibility():
check_backwards_compatibility(CRC32.codec_id, arrays, [CRC32()])
check_backwards_compatibility(Adler32.codec_id, arrays, [Adler32()])
def test_err_encode_object_buffer():
check_err_encode_object_buffer(CRC32())
check_err_encode_object_buffer(Adler32())
| {
"content_hash": "af1929014dfc6a5a5f2ac4ddcf19e27f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 82,
"avg_line_length": 26.70175438596491,
"alnum_prop": 0.6438896189224704,
"repo_name": "zarr-developers/numcodecs",
"id": "2cacc9202a529ead9bc9f89c7dc2bf92e0a13220",
"size": "1522",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "numcodecs/tests/test_checksum32.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "45959"
},
{
"name": "Jupyter Notebook",
"bytes": "16894"
},
{
"name": "Python",
"bytes": "149828"
}
],
"symlink_target": ""
} |
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
file = open(href)
if parse == "xml":
data = ElementTree.parse(file).getroot()
else:
data = file.read()
if encoding:
data = data.decode(encoding)
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| {
"content_hash": "40665c28aa65e1e891a2e9e8ac26a806",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 74,
"avg_line_length": 33.634408602150536,
"alnum_prop": 0.5476342710997443,
"repo_name": "MalloyPower/parsing-python",
"id": "d7f85b3217c4abf322a65aaf15583409c0919a57",
"size": "5045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/xml/etree/ElementInclude.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
"""PPO."""
import collections
import functools
from clu import metric_writers
import deluca.core
import jax
import jax.numpy as jnp
import optax
import time
# pylint:disable=invalid-name
Rollout = collections.namedtuple("Rollout", [
"agent_states", "actions", "losses"
])
@functools.partial(
jax.jit, static_argnums=(
5,
6,
))
def apg_rollout(env, env_start_state, env_init_obs, agent, agent_start_state, unroll_length, loss_func):
"""Comment."""
def rollout_step(inp, counter):
env_state, env_obs, agent_state = inp
agent_next_state, action = agent(agent_state, env_obs)
env_next_state, env_next_obs = env(env_state, action)
loss = loss_func(env_state, env_obs, action, env, counter)
return (env_next_state, env_next_obs,
agent_next_state), (env_next_state, env_next_obs, agent_next_state,
action, loss)
_, t = jax.lax.scan(rollout_step,
(env_start_state, env_init_obs, agent_start_state),
jnp.arange(unroll_length))
return Rollout(t[2], t[3], t[4])
@functools.partial(
jax.jit, static_argnums=(
5,
6,
))
def apg_parallel_rollouts(env, env_start_states, env_init_obs, agent,
agent_start_states, unroll_length, loss_func):
"""Parallelize rollouts."""
# if agent_seeds == None:
# agent_seeds = jnp.arange(num)
# env_start_states, env_init_obs = env.reset()
all_rollouts = jax.vmap(apg_rollout,
(None, 0, 0, None, 0, None, None))(
env, env_start_states, env_init_obs, agent,
agent_start_states, unroll_length, loss_func)
return all_rollouts
def apg_loss(agent, env, env_start_states, env_init_obs, agent_init_states,
unroll_length, loss_func):
"""Serialize the experiences."""
rollouts = apg_parallel_rollouts(env, env_start_states, env_init_obs, agent,
agent_init_states, unroll_length, loss_func)
return rollouts.losses.mean()
@functools.partial(
jax.jit, static_argnums=(
0,
1,
8,
9,
))
def train_chunk(chunk_size, optim, optim_state, agent, env, env_start_states,
env_init_obs, agent_init_states, unroll_length, loss_func):
"""Compilable train step.
Runs an entire epoch of training (i.e. the loop over minibatches within
an epoch is included here for performance reasons).
"""
@jax.jit
def single_step(carry, inp):
agent, optim_state = carry
grad_fn = jax.value_and_grad(apg_loss)
l, grads = grad_fn(agent, env, env_start_states, env_init_obs, agent_init_states, unroll_length, loss_func)
updates, optim_state = optim.update(grads, optim_state, agent)
agent = optax.apply_updates(agent, updates)
return (agent, optim_state), l
(agent, optim_state), losses = jax.lax.scan(single_step, (agent, optim_state), None, length=chunk_size)
return agent, optim_state, losses
def train(env, agent, loss_func, horizon, config, workdir=None):
"""Main training loop.
config
- num_episodes
- episodes_per_eval
- training_env_batch_size
- eval_env_batch_size = 32
- optimizer
- learning_rate
- seed = 1
"""
print(config)
if workdir is not None:
writer = metric_writers.create_default_writer(
logdir=workdir, just_logging=jax.process_index() != 0)
writer.write_hparams(dict(config))
key = jax.random.PRNGKey(config.seed)
key_train_agent, key_eval_agent, key_train_env, key_eval_env, key_train, key = jax.random.split(key, 6)
key_train_envs = jax.random.split(key_train_env, config.training_env_batch_size)
key_train_agents = jax.random.split(key_train_agent, config.training_env_batch_size)
key_eval_envs = jax.random.split(key_eval_env, config.eval_env_batch_size)
key_eval_agents = jax.random.split(key_eval_agent, config.eval_env_batch_size)
#TODO(danielsuo): The following vmap code does not work.
train_env_start_states, train_env_init_obs = jax.vmap(env.init)(key_train_envs)
eval_env_start_states, eval_env_init_obs = jax.vmap(env.init)(key_eval_envs)
print(train_env_start_states)
print(train_env_init_obs)
# qtrain_init_list = list(map(env.init, key_train_envs))
# qtrain_env_start_states = [a for (a,_) in qtrain_init_list]
# qtrain_env_init_obs = [b for (_,b) in qtrain_init_list]
# print(qtrain_env_start_states)
# print(qtrain_env_init_obs)
# eval_init_list = list(map(env.init, key_eval_envs))
# eval_env_start_states = [a for (a,_) in eval_init_list]
# eval_env_init_obs = [b for (_,b) in eval_init_list]
train_agent_start_states = jax.vmap(agent.init)(key_train_agents)
eval_agent_start_states = jax.vmap(agent.init)(key_eval_agents)
if config.optimizer == "Adam":
optim = optax.adam(learning_rate=config.learning_rate)
else:
# default is SGD
optim = optax.sgd(learning_rate=config.learning_rate)
optim_state = optim.init(agent)
for episode in range(0, config.num_episodes, config.episodes_per_eval):
# Eval Step
tt = time.time()
eval_rollouts = apg_parallel_rollouts(env, eval_env_start_states,
eval_env_init_obs, agent,
eval_agent_start_states, horizon,
loss_func)
test_score = eval_rollouts.losses.mean()
print(f"TESTING episode {episode} - score:{test_score} - time:{time.time()-tt}")
# Training Step
tt = time.time()
agent, optim_state, losses = train_chunk(config.episodes_per_eval, optim,
optim_state, agent, env,
train_env_start_states,
train_env_init_obs,
train_agent_start_states, horizon,
loss_func)
done_eps = episode + config.episodes_per_eval - 1
print(f"TRAINING: episode {done_eps} - score:{losses[0]} - time {time.time() - tt}")
if workdir is not None:
for (i, loss) in enumerate(reversed(losses)):
writer.write_scalars(episode+i, {"train_score": loss})
writer.write_scalars(episode, {"test_score": test_score})
return optim_state, agent
| {
"content_hash": "35d0fa1ccf0726b5ddd52c9771f365c0",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 111,
"avg_line_length": 37.19298245614035,
"alnum_prop": 0.6177672955974842,
"repo_name": "google/deluca",
"id": "d725c92b0b26536f00837808e7a1b2e7056146f4",
"size": "6944",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "deluca/training/apg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "297090"
}
],
"symlink_target": ""
} |
"""
Multi-dimensional Scaling (MDS)
"""
# author: Nelle Varoquaux <[email protected]>
# Licence: BSD
import numpy as np
import warnings
from ..base import BaseEstimator
from ..metrics import euclidean_distances
from ..utils import check_random_state, check_array
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..isotonic import IsotonicRegression
def _smacof_single(similarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
similarities: symmetric ndarray, shape [n * n]
similarities between the points
metric: boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components: int, optional, default: 2
number of dimension in which to immerse the similarities
overwritten if initial array is provided.
init: {None or ndarray}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
max_iter: int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose: int, optional, default: 0
level of verbosity
eps: float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X: ndarray (n_samples, n_components), float
coordinates of the n_samples points in a n_components-space
stress_: float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
Number of iterations run.
"""
n_samples = similarities.shape[0]
random_state = check_random_state(random_state)
if similarities.shape[0] != similarities.shape[1]:
raise ValueError("similarities must be a square array (shape=%d)" %
n_samples)
if not np.allclose(similarities, similarities.T):
raise ValueError("similarities must be symmetric")
sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = similarities
else:
dis_flat = dis.ravel()
# similarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1. / n_samples * np.dot(B, X)
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
def smacof(similarities, metric=True, n_components=2, init=None, n_init=8,
n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None,
return_n_iter=False):
"""
Computes multidimensional scaling using SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
The SMACOF algorithm is a multidimensional scaling algorithm: it minimizes
a objective function, the *stress*, using a majorization technique. The
Stress Majorization, also known as the Guttman Transform, guarantees a
monotone convergence of Stress, and is more powerful than traditional
techniques such as gradient descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression steps before computing
the stress.
Parameters
----------
similarities : symmetric ndarray, shape (n_samples, n_samples)
similarities between the points
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
init : {None or ndarray of shape (n_samples, n_components)}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
n_init : int, optional, default: 8
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
X : ndarray (n_samples,n_components)
Coordinates of the n_samples points in a n_components-space
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
The number of iterations corresponding to the best stress.
Returned only if `return_n_iter` is set to True.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
similarities = check_array(similarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if n_jobs == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
similarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
similarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class MDS(BaseEstimator):
"""Multidimensional scaling
Parameters
----------
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
n_init : int, optional, default: 4
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
dissimilarity : string
Which dissimilarity measure to use.
Supported are 'euclidean' and 'precomputed'.
Attributes
----------
embedding_ : array-like, shape [n_components, n_samples]
Stores the position of the dataset in the embedding space
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, init=None, y=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, init=None, y=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity=precomputed``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_
| {
"content_hash": "ac7247af53c32294847adee419f17d7e",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 79,
"avg_line_length": 36.52997601918465,
"alnum_prop": 0.6248276767544148,
"repo_name": "hitszxp/scikit-learn",
"id": "6be371171fe8bb223d1102ce1f3e6a90824ccfd1",
"size": "15233",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "sklearn/manifold/mds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18556981"
},
{
"name": "C++",
"bytes": "1808980"
},
{
"name": "CSS",
"bytes": "1503"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Makefile",
"bytes": "4897"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5658750"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
} |
import re
import discord
import Data
import deco
import utilities
import misc
import undecorated
import MemeMachine
import re
import owner
client = discord.Client()
@client.event
async def on_ready():
print('Logged in !')
print('------------')
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith(Data.PREFIX):
if message.content.startswith(Data.MEME_REDIRECT):
await MemeMachine.meme_machine(client, message)
return
foo_name = message.content.split()[0][len(Data.PREFIX):]
try:
await deco.foo_dict[foo_name](client, message)
except KeyError:
print("Command function not found, must be a type request !")
pass
if re.fullmatch(Data.MBTI, message.content[len(Data.PREFIX):].lower()):
await undecorated.typeroles(client, message)
@client.event
async def on_member_join(member):
bot_message = client.get_channel('139718812705619968')
join = 'Welcome on this server, {}.\n'.format(member.mention)
join += 'If you know your type, you can set it using /your-type, e.g. /ESFP\n'
join += 'For a general guidline, please visit #code-of-conduct.\n'
join += 'Otherwise, have fun, and don\'t be shy to say hello, we are a friendly bunch of people :)'
await client.send_message(member.server, join)
await client.send_message(bot_message, '\'{}\' (UID: {}) has joined the server'.format(member.name, member.id))
print('\'{}\' (UID: {}) has joined the server'.format(member.name, member.id))
@client.event
async def on_member_remove(member):
bot_message = client.get_channel('139718812705619968')
await client.send_message(bot_message, '\'{}\' (UID: {}) has left the server'.format(member.name, member.id))
client.run(Data.TOKEN)
| {
"content_hash": "9cc8403b4f2caa3a8218cba5dafa2345",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 115,
"avg_line_length": 32.70175438596491,
"alnum_prop": 0.6652360515021459,
"repo_name": "zenAndroid/zenBot",
"id": "1d31efbfe25f1c2284b99acd525710c6aa2b45d3",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zenBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20566"
}
],
"symlink_target": ""
} |
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
import traceback
import pytest
from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
from _pytest.python import FixtureRequest
def pytest_addoption(parser):
parser.addini('doctest_optionflags', 'option flags for doctests',
type="args", default=["ELLIPSIS"])
group = parser.getgroup("collect")
group.addoption("--doctest-modules",
action="store_true", default=False,
help="run doctests in all .py modules",
dest="doctestmodules")
group.addoption("--doctest-glob",
action="append", default=[], metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob")
group.addoption("--doctest-ignore-import-errors",
action="store_true", default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors")
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules:
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_doctest(config, path, parent):
if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ['test*.txt']
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self.runner.run(self.dtest)
def repr_failure(self, excinfo):
import doctest
if excinfo.errisinstance((doctest.DocTestFailure,
doctest.UnexpectedException)):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
REPORT_UDIFF = doctest.REPORT_UDIFF
if lineno is not None:
lines = doctestfailure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = ["%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)]
# trim docstring error lines to 10
lines = lines[example.lineno - 9:example.lineno + 1]
else:
lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
indent = '>>>'
for line in example.source.splitlines():
lines.append('??? %s %s' % (indent, line))
indent = '...'
if excinfo.errisinstance(doctest.DocTestFailure):
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
else:
inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" %
repr(inner_excinfo.value)]
lines += traceback.format_exception(*excinfo.value.exc_info)
return ReprFailDoctest(reprlocation, lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, None, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
text = self.fspath.read()
filename = str(self.fspath)
name = self.fspath.basename
globs = {'__name__': '__main__'}
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_checker())
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip('all tests skipped by +SKIP option')
class DoctestModule(pytest.Module):
def collect(self):
import doctest
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue('doctest_ignore_import_errors'):
pytest.skip('unable to import module %r' % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = doctest.DocTestFinder()
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_checker())
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
cls=None, funcargs=False)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, 'LiteralsOutputChecker'):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r'\1\2', txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_UNICODE')
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_BYTES')
| {
"content_hash": "6d5eb492a81d72dcf353b6d72f8a8560",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 91,
"avg_line_length": 35.96527777777778,
"alnum_prop": 0.5954817532342151,
"repo_name": "JonathonSonesen/pytest",
"id": "1fd8b419be97b6b8d5ffb7b046e1c093fe3e54f2",
"size": "10358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_pytest/doctest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1230310"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
} |
from RMLApp.models import RemindMe
from RMLApp.serializers import RemindMeSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from Remind_Me_Later.tasks import sendEmail, sendSMS
@api_view(['GET'])
def remindme_list(request, format=None):
if request.method == 'GET':
remindme = RemindMe.objects.all()
serializer = RemindMeSerializer(remindme, many=True)
return Response(serializer.data)
return Response({'status':'error'}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def remindme_add(request, format=None):
if request.method == 'POST':
serializer = RemindMeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
#print(serializer.data['id'])
modelid = serializer.data['id']
email = serializer.validated_data['remind_email']
phone = serializer.validated_data['remind_phone']
time = serializer.validated_data['remind_date']
if email:
sendEmail.apply_async( eta=time, kwargs={'id': modelid})
if phone:
sendSMS.apply_async( eta=time, kwargs={'id': modelid})
#print()
#time = datetime(time)
return Response({'status':'ok'},status=status.HTTP_201_CREATED)
return Response({'status':'error','error':serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
return Response({'status':'error'}, status=status.HTTP_400_BAD_REQUEST)
| {
"content_hash": "134080814510a91f3a707e95cc841dc3",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 39.275,
"alnum_prop": 0.6537237428389561,
"repo_name": "sandeep-daiict/Remind-Me-Later",
"id": "7f8c81a73807a3ff7a5b55dacb377d8a885f764c",
"size": "1571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Remind_Me_Later/RMLApp/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12792"
},
{
"name": "Shell",
"bytes": "176"
}
],
"symlink_target": ""
} |
from BaseHTTPServer import BaseHTTPRequestHandler
import cStringIO
import json
import traceback
import config
from src.music import MusicFinder, MusicPlayer
TEXT_HTML = 'text/html'
APPLICATION_JSON = 'application/json'
APPLICATION_JAVA_SCRIPT = 'application/javascript'
class RemotePlayHttpHandler(BaseHTTPRequestHandler):
_finder = None
_music_player = None
def __init__(self, request, client_address, server):
if self.__class__._finder is None:
self.__class__._finder = MusicFinder(config.MUSIC_DIRECTORY)
if self.__class__._music_player is None:
self.__class__._music_player = MusicPlayer(self.__class__._finder)
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def send_text_response(self, type, output):
self.send_response(200)
self.send_header('Content-type', "%s; charset=utf-8" % type)
self.end_headers()
self.wfile.write(output.getvalue())
def get_html_begins(self):
result = "<html><head><title></title></head><body>"
return result
def get_html_ends(self):
result = "</body></html>"
return result
def do_GET(self):
try:
output = cStringIO.StringIO()
response_type = TEXT_HTML
if self.path == '/':
output.write(self.get_html_begins())
output.write('<p>Remote Play Index</p>')
output.write(self.get_html_ends())
if self.path.startswith("/html/"):
html_file = self.path.replace('/html/', '', 1)
with open('html/%s.htm' % html_file) as handle:
content = handle.read()
output.write(content)
if self.path == "/js":
response_type = APPLICATION_JAVA_SCRIPT
with open('./js/rp.js') as handle:
content = handle.read()
output.write(content)
if self.path.startswith("/play/"):
music_id = self.path.replace('/play/', '', 1)
self.__class__._music_player.play_from_id(music_id)
if self.path.startswith("/set_volume/"):
volume = self.path.replace('/set_volume/', '', 1)
self.__class__._music_player.set_volume(float(volume) / 100.0)
if self.path == '/play':
self.__class__._music_player.play()
if self.path == '/stop':
self.__class__._music_player.stop()
if self.path == '/pause':
self.__class__._music_player.pause()
if self.path == '/resume':
self.__class__._music_player.resume()
if self.path == '/play_next':
self.__class__._music_player.play_next()
if self.path == '/play_prev':
self.__class__._music_player.play_prev()
if self.path == '/current':
response_type = APPLICATION_JSON
response = {
'volume': self.__class__._music_player.get_volume(),
'position': self.__class__._music_player.get_position(),
'title': self.__class__._music_player.current_title(),
'artist': self.__class__._music_player.current_artist(),
'album': self.__class__._music_player.current_album(),
'length': self.__class__._music_player.current_length()
}
output.write(json.dumps(response))
if self.path == '/list':
response_type = APPLICATION_JSON
musics = self.__class__._finder.list_musics()
self.__class__._music_player.set_list(musics)
output.write(json.dumps(musics))
self.send_text_response(response_type, output)
except:
traceback.print_exc()
self.send_error(500, 'The server failed: %s' % self.path) | {
"content_hash": "d655415c55934629a64ec6a9c7e711d1",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 35.723214285714285,
"alnum_prop": 0.5321169707573107,
"repo_name": "paulocanedo/remote_play",
"id": "872fcecd774a5fe2f841bc59644fc9f8a848565f",
"size": "4001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "582"
},
{
"name": "Python",
"bytes": "16294"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('supplier_partner', '0003_auto_20180215_0942'),
]
operations = [
migrations.AlterField(
model_name='buyer',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='partner',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='supplier',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| {
"content_hash": "362b7c02e4f4886306973f97c8b44489",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 111,
"avg_line_length": 31.73076923076923,
"alnum_prop": 0.5890909090909091,
"repo_name": "cdaf/cbe",
"id": "97ab1121b744fbb001af0c0c302109dc78cf3955",
"size": "874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cbe/cbe/supplier_partner/migrations/0004_auto_20210617_2350.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2292"
},
{
"name": "HTML",
"bytes": "3112"
},
{
"name": "PowerShell",
"bytes": "20448"
},
{
"name": "Python",
"bytes": "241197"
}
],
"symlink_target": ""
} |
import nose.tools
from nose import SkipTest
from nose.plugins.attrib import attr
from tests.common import WpsTestClient, assert_response_success
@attr('online')
def test_wps_analogs():
raise SkipTest
wps = WpsTestClient()
datainputs = "[experiment=NCEP;dateSt=1958-07-15;dateEn=1958-12-31]"
resp = wps.get(service='wps', request='execute', version='1.0.0', identifier='analogs',
datainputs=datainputs)
assert_response_success(resp)
| {
"content_hash": "e3cefec17b6674f2aacbcd348ff6b12b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 91,
"avg_line_length": 29.6875,
"alnum_prop": 0.7094736842105264,
"repo_name": "sradanov/flyingpigeon",
"id": "3b923c2fcc2d091d540d7feb95e0f79e0526c15d",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_wps_analogs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5904"
},
{
"name": "C",
"bytes": "167437"
},
{
"name": "C++",
"bytes": "185682"
},
{
"name": "CMake",
"bytes": "80300"
},
{
"name": "FORTRAN",
"bytes": "34034612"
},
{
"name": "Groff",
"bytes": "129542"
},
{
"name": "Jupyter Notebook",
"bytes": "1005031"
},
{
"name": "M4",
"bytes": "182454"
},
{
"name": "Makefile",
"bytes": "807377"
},
{
"name": "PAWN",
"bytes": "6378"
},
{
"name": "Pascal",
"bytes": "102662"
},
{
"name": "Python",
"bytes": "259095"
},
{
"name": "R",
"bytes": "49639"
},
{
"name": "Shell",
"bytes": "1311673"
},
{
"name": "TeX",
"bytes": "339616"
}
],
"symlink_target": ""
} |
import abc
import weakref
import asyncio
class Person:
def __init__(self, id, name):
self.id = id
self.name = name
@asyncio.coroutine
def get_name(self):
return self.name
class IncomingMessage:
def __init__(self, protocol, conversation, author, text, timestamp=None, attachments=[]):
self.protocol = weakref.ref(protocol)
self.conversation = conversation
self.author = author
self.text = text
self.timestamp = timestamp
self.attachments = attachments
self.handled = None
class Message:
def __init__(self, text, attachments=[]):
self.text = text
self.attachments = attachments
class Conversation(abc.ABC):
def __init__(self, protocol, title):
self.protocol = weakref.ref(protocol)
self.title = title
@abc.abstractmethod
@asyncio.coroutine
def send(self, msg):
pass
@asyncio.coroutine
def reply(self, *args, **kwargs):
yield from self.send(Message(*args, **kwargs))
| {
"content_hash": "61f58a32693b9636fa60686f2e3996a4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 93,
"avg_line_length": 23.266666666666666,
"alnum_prop": 0.6217765042979942,
"repo_name": "Slko/Slot",
"id": "217b157442e9e0f2aa52c1cd5fa41293eb5113ac",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slot/message.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "68413"
}
],
"symlink_target": ""
} |
from VisualisationPlugin import VisualisationPlugin
import pygame
import pygame.camera
import logging
from DDRPi import FloorCanvas
from lib.controllers import ControllerInput
class CameraVisualisationPlugin(VisualisationPlugin):
logger = logging.getLogger(__name__)
def __init__(self):
self.clock = pygame.time.Clock()
self.webcam = None
self.webcam_index = None
# Nothing specific to be done before this starts, although we could
# set self.clock here. Stash any config so we can use it later
def configure(self, config):
self.config = config
self.logger.info("Config: %s" % config)
# This will just keep running, nothing specific to do for start(), stop(),
# pause() or resume()
def start(self):
pygame.camera.init()
camera_list = pygame.camera.list_cameras() # list available cameras
self.logger.info("Cameras found: %d" % len(camera_list))
if len(camera_list) > 0:
for camera in camera_list:
self.logger.info("CAMERA: %s" % camera)
# # Choose the first webcam
self.webcam_index = 0
self.webcam = pygame.camera.Camera(camera_list[0], (800, 600))
#
self.webcam.start() # start camera
pass
def stop(self):
self.webcam.stop()
def pause(self):
self.stop()
def resume(self):
self.start()
def handle_event(self, event):
try:
if (pygame.event.event_name(event.type) == "JoyButtonDown"):
# iterate over the attached cameras if there are more than one
if (event.button == ControllerInput.BUMPER_RIGHT):
camera_list = pygame.camera.list_cameras()
if len(camera_list) > 0:
# Stop the existing one
self.logger.info("Stopping current camera: %s" % self.webcam)
self.webcam.stop()
if self.webcam_index is not None:
self.webcam_index += 1
else:
self.webcam_index = 0
if self.webcam_index >= len(camera_list):
self.webcam_index = 0
self.webcam = pygame.camera.Camera(camera_list[self.webcam_index], (800, 600))
self.logger.info("starting new camera: %s" % self.webcam)
self.webcam.start()
except Exception as e:
self.logger.warn(e)
def draw_frame(self, surface):
if self.webcam is not None:
self.logger.info("Getting image from webcam %s" % self.webcam)
image = self.webcam.get_image()
resized_image = pygame.transform.scale(image, (surface.get_width(), surface.get_height()))
pixel_array = pygame.surfarray.pixels3d(resized_image)
#self.logger.info(pixel_array)
for row, row_data in enumerate(pixel_array):
for column, pixel_data in enumerate(row_data):
surface.set_pixel(row, column, (pixel_data[0], pixel_data[1], pixel_data[2]))
# Limit the frame rate.
# This sleeps so that at least 25ms has passed since tick()
# was last called. It is a no-op if the loop is running slow
self.clock.tick(25)
# Draw whatever this plugin does.
# We need to return our decorated surface
return surface
def draw_splash(self, canvas):
return self.draw_surface(canvas, 0)
# We've split the method that does the drawing out, so that draw_splash()
# can call it with a fixed timer
def draw_surface(self, canvas, ticks):
canvas.set_colour(FloorCanvas.BLUE)
# Return the canvas
return canvas
| {
"content_hash": "ba1b84eaa73d11a3abfe5467352cfcea",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 102,
"avg_line_length": 36.8,
"alnum_prop": 0.5750517598343685,
"repo_name": "fraz3alpha/led-disco-dancefloor",
"id": "172dbf49dbc142729b6470480a3de1df6e90d2de",
"size": "3864",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "software/controller/visualisation_plugins/camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "16623"
},
{
"name": "Eagle",
"bytes": "898737"
},
{
"name": "Python",
"bytes": "302101"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from typing import List
from .alarm_types import AlarmType, AlarmCommand, AlarmSignal, AlarmCurl, AlarmLog, AlarmMule, AlarmXmpp
from ..base import OptionsGroup
from ..utils import listify
TypeAlarmExt = List[AlarmType]
class Alarms(OptionsGroup):
"""Alarms.
This subsystem allows the developer/sysadmin to "announce"
special conditions of an app via various channels.
* http://uwsgi-docs.readthedocs.io/en/latest/AlarmSubsystem.html
"""
class alarm_types:
"""Alarm types available for ``.register_alarm()``."""
command = AlarmCommand
curl = AlarmCurl
log = AlarmLog
mule = AlarmMule
signal = AlarmSignal
xmpp = AlarmXmpp
def __init__(self, *args, **kwargs):
self._alarms = []
super().__init__(*args, **kwargs)
def set_basic_params(self, *, msg_size: int = None, cheap: bool = None, anti_loop_timeout: int = None):
"""
:param msg_size: Set the max size of an alarm message in bytes. Default: 8192.
:param cheap: Use main alarm thread rather than create dedicated
threads for curl-based alarms
:param anti_loop_timeout: Tune the anti-loop alarm system. Default: 3 seconds.
"""
self._set('alarm-msg-size', msg_size)
self._set('alarm-cheap', cheap, cast=bool)
self._set('alarm-freq', anti_loop_timeout)
return self._section
def print_alarms(self):
"""Print out enabled alarms."""
self._set('alarm-list', True, cast=bool)
return self._section
def register_alarm(self, alarm: TypeAlarmExt):
"""Register (create) an alarm.
:param alarm: Alarm.
"""
for alarm in listify(alarm):
if alarm not in self._alarms:
self._set('alarm', alarm, multi=True)
self._alarms.append(alarm)
return self._section
def alarm_on_log(self, alarm: TypeAlarmExt, matcher: str, *, skip: bool = False):
"""Raise (or skip) the specified alarm when a log line matches the specified regexp.
:param alarm: Alarm.
:param matcher: Regular expression to match log line.
:param skip:
"""
self.register_alarm(alarm)
value = f"{','.join(map(attrgetter('alias'), listify(alarm)))} {matcher}"
self._set('not-alarm-log' if skip else 'alarm-log', value)
return self._section
def alarm_on_fd_ready(self, alarm: TypeAlarmExt, *, fd: str, message: str, byte_count: int = None):
"""Triggers the alarm when the specified file descriptor is ready for read.
This is really useful for integration with the Linux eventfd() facility.
Pretty low-level and the basis of most of the alarm plugins.
* http://uwsgi-docs.readthedocs.io/en/latest/Changelog-1.9.7.html#alarm-fd
:param alarm: Alarm.
:param fd: File descriptor.
:param message: Message to send.
:param byte_count: Files to read. Default: 1 byte.
.. note:: For ``eventfd`` set 8.
"""
self.register_alarm(alarm)
value = fd
if byte_count:
value += f':{byte_count}'
value += f' {message}'
for alarm_ in listify(alarm):
self._set('alarm-fd', f'{alarm_.alias} {value}', multi=True)
return self._section
def alarm_on_queue_full(self, alarm: TypeAlarmExt):
"""Raise the specified alarm when the socket backlog queue is full.
:param alarm: Alarm.
"""
self.register_alarm(alarm)
for alarm_ in listify(alarm):
self._set('alarm-backlog', alarm_.alias, multi=True)
return self._section
def alarm_on_segfault(self, alarm: TypeAlarmExt):
"""Raise the specified alarm when the segmentation fault handler is executed.
Sends a backtrace.
:param alarm: Alarm.
"""
self.register_alarm(alarm)
for alarm_ in listify(alarm):
self._set('alarm-segfault', alarm_.alias, multi=True)
return self._section
| {
"content_hash": "f13694d939aecafa514c3f01063e7fdf",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 107,
"avg_line_length": 28.14965986394558,
"alnum_prop": 0.6072982116964717,
"repo_name": "idlesign/uwsgiconf",
"id": "e376636a1ff64e8915c3d21e45a7964423a251c3",
"size": "4138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uwsgiconf/options/alarms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5422"
},
{
"name": "Python",
"bytes": "522381"
}
],
"symlink_target": ""
} |
"""Contains global paths."""
# pylama:ignore=E221 (whitespace error)
# Relative to channel instance
DATABASE_PATH = '{}data/monkalot.db'
IGNORED_USERS_PATH = '{}data/ignored_users.json'
NOTIFICATIONS_FILE = '{}data/notifications.json'
PRONOUNS_PATH = '{}data/pronouns.json'
QUOTES_FILE = '{}data/quotes.json'
REPLIES_FILE = '{}data/sreply_cmds.json'
SLAPHUG_FILE = '{}data/slaphug.json'
SMORC_FILE = '{}data/smorc.json'
STATISTIC_FILE = '{}data/emote_stats.json'
TRUSTED_MODS_PATH = '{}data/trusted_mods.json'
JSON_DATA_PATH = '{}data/api_json_data/{}'
CONFIG_PATH = '{}configs/bot_config.json'
CUSTOM_RESPONSES_PATH = '{}configs/responses.json'
# Absolute paths
COMMON_API_JSON_DATA_PATH = 'data/common_api_json_data/{}'
JSON_FILE_INDEX_PATH = 'data/common_api_json_data/json_index.json'
TEMPLATE_RESPONSES_PATH = 'channels/template/configs/responses.json'
# File names
CHANNEL_BTTV_EMOTE_JSON_FILE = 'channel_bttv.json'
# APIs
TWITCH_TMI = 'http://tmi.twitch.tv/'
USERLIST_API = TWITCH_TMI + 'group/user/{}/chatters'
TWITCH_API = 'https://api.twitch.tv/'
OIDC_API = TWITCH_API + '/api/oidc/keys'
TWITCH_KRAKEN_API = TWITCH_API + 'kraken/'
CHANNEL_API = TWITCH_KRAKEN_API + 'channels/{}'
STREAMS_API = TWITCH_KRAKEN_API + 'streams/{}'
USER_EMOTE_API = TWITCH_KRAKEN_API + 'users/{}/emotes'
USER_ID_API = TWITCH_KRAKEN_API + 'users/{}'
USER_NAME_API = TWITCH_KRAKEN_API + 'users?login={}'
TWITCH_EMOTE_API = TWITCH_KRAKEN_API + 'chat/emoticon_images?emotesets=0'
BTTV_API = 'https://api.betterttv.net/2/'
GLOBAL_BTTVEMOTES_API = BTTV_API + 'emotes'
CHANNEL_BTTVEMOTES_API = BTTV_API + 'channels/{}'
HEARTHSTONE_CARD_API = 'http://api.hearthstonejson.com/v1/latest/enUS/cards.collectible.json'
EMOJI_API = "https://raw.githubusercontent.com/github/gemoji/master/db/emoji.json"
| {
"content_hash": "e3b7434be83fb53ad206544fde35ab0a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 93,
"avg_line_length": 38.43396226415094,
"alnum_prop": 0.6298478154148257,
"repo_name": "ghostduck/monkalot",
"id": "867fe316eadc4f6c921338afd0285acb894fe95b",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/paths/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "195166"
}
],
"symlink_target": ""
} |
from .main import app
| {
"content_hash": "113af8fa35f55016d02a94a987a41942",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "jniedrauer/jniedrauer.com",
"id": "e4c7aa543f9b2957eca502687a621c81a2b25947",
"size": "22",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2757"
},
{
"name": "HCL",
"bytes": "13050"
},
{
"name": "HTML",
"bytes": "14702"
},
{
"name": "Makefile",
"bytes": "2469"
},
{
"name": "Python",
"bytes": "5728"
},
{
"name": "Shell",
"bytes": "87"
},
{
"name": "Smarty",
"bytes": "338"
}
],
"symlink_target": ""
} |
"""
Runs tests and writes the results to the report.json file.
This should be executed inside Blender, not from normal Python!
"""
import glob
import json
import os
from timeit import default_timer as timer
import sys
import bpy
print('bpy.app.version:', bpy.app.version)
print('python sys.version:', sys.version)
base_dir = os.path.dirname(os.path.abspath(__file__))
samples_path = os.path.join(base_dir, 'glTF-Sample-Models', '2.0')
site_local_path = os.path.join(base_dir, 'site_local')
report_path = os.path.join(base_dir, 'report.json')
tests = []
files = (
glob.glob(samples_path + '/**/*.gltf', recursive=True) +
glob.glob(samples_path + '/**/*.glb', recursive=True) +
glob.glob(site_local_path + '/**/*.glb', recursive=True) +
glob.glob(site_local_path + '/**/*.glb', recursive=True)
)
# Skip Draco encoded files for now
files = [fn for fn in files if 'Draco' not in fn]
for filename in files:
short_name = os.path.relpath(filename, samples_path)
print('\nTrying ', short_name, '...')
bpy.ops.wm.read_factory_settings()
try:
start_time = timer()
bpy.ops.import_scene.gltf_ksons(filepath=filename)
end_time = timer()
print('[PASSED]\n')
test = {
'filename': short_name,
'result': 'PASSED',
'timeElapsed': end_time - start_time,
}
except Exception as e:
print('[FAILED]\n')
test = {
'filename': filename,
'result': 'FAILED',
'error': str(e),
}
tests.append(test)
report = {
'blenderVersion': list(bpy.app.version),
'tests': tests,
}
with open(report_path, 'w+') as f:
json.dump(report, f, indent=4)
| {
"content_hash": "ae8da14fc8ff5305ef6104b7d2262d0d",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 66,
"avg_line_length": 25.235294117647058,
"alnum_prop": 0.6078088578088578,
"repo_name": "ksons/gltf-blender-importer",
"id": "1e492d778f644ad82733828cf521bcc9afd57ca2",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/bl_generate_report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146587"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import functools
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from grafeas.grafeas_v1.services.grafeas import pagers
from grafeas.grafeas_v1.types import attestation
from grafeas.grafeas_v1.types import build
from grafeas.grafeas_v1.types import common
from grafeas.grafeas_v1.types import compliance
from grafeas.grafeas_v1.types import deployment
from grafeas.grafeas_v1.types import discovery
from grafeas.grafeas_v1.types import dsse_attestation
from grafeas.grafeas_v1.types import grafeas
from grafeas.grafeas_v1.types import image
from grafeas.grafeas_v1.types import package
from grafeas.grafeas_v1.types import upgrade
from grafeas.grafeas_v1.types import vulnerability
from .transports.base import GrafeasTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import GrafeasGrpcAsyncIOTransport
from .client import GrafeasClient
class GrafeasAsyncClient:
"""`Grafeas <https://grafeas.io>`__ API.
Retrieves analysis results of Cloud components such as Docker
container images.
Analysis results are stored as a series of occurrences. An
``Occurrence`` contains information about a specific analysis
instance on a resource. An occurrence refers to a ``Note``. A note
contains details describing the analysis and is generally stored in
a separate project, called a ``Provider``. Multiple occurrences can
refer to the same note.
For example, an SSL vulnerability could affect multiple images. In
this case, there would be one note for the vulnerability and an
occurrence for each image with the vulnerability referring to that
note.
"""
_client: GrafeasClient
note_path = staticmethod(GrafeasClient.note_path)
parse_note_path = staticmethod(GrafeasClient.parse_note_path)
occurrence_path = staticmethod(GrafeasClient.occurrence_path)
parse_occurrence_path = staticmethod(GrafeasClient.parse_occurrence_path)
project_path = staticmethod(GrafeasClient.project_path)
parse_project_path = staticmethod(GrafeasClient.parse_project_path)
common_billing_account_path = staticmethod(
GrafeasClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
GrafeasClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(GrafeasClient.common_folder_path)
parse_common_folder_path = staticmethod(GrafeasClient.parse_common_folder_path)
common_organization_path = staticmethod(GrafeasClient.common_organization_path)
parse_common_organization_path = staticmethod(
GrafeasClient.parse_common_organization_path
)
common_project_path = staticmethod(GrafeasClient.common_project_path)
parse_common_project_path = staticmethod(GrafeasClient.parse_common_project_path)
common_location_path = staticmethod(GrafeasClient.common_location_path)
parse_common_location_path = staticmethod(GrafeasClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GrafeasAsyncClient: The constructed client.
"""
return GrafeasClient.from_service_account_info.__func__(GrafeasAsyncClient, info, *args, **kwargs) # type: ignore
@property
def transport(self) -> GrafeasTransport:
"""Returns the transport used by the client instance.
Returns:
GrafeasTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(GrafeasClient).get_transport_class, type(GrafeasClient)
)
def __init__(
self,
*,
transport: Union[str, GrafeasTransport] = "grpc_asyncio",
) -> None:
"""Instantiate the grafeas client.
Args:
transport (Union[str, ~.GrafeasTransport]): The
transport to use.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = GrafeasClient(
transport=transport,
)
async def get_occurrence(
self,
request: Union[grafeas.GetOccurrenceRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Occurrence:
r"""Gets the specified occurrence.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_get_occurrence():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.GetOccurrenceRequest(
name="name_value",
)
# Make the request
response = await client.get_occurrence(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.GetOccurrenceRequest, dict]):
The request object. Request to get an occurrence.
name (:class:`str`):
The name of the occurrence in the form of
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Occurrence:
An instance of an analysis type that
has been found on a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.GetOccurrenceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_occurrence,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_occurrences(
self,
request: Union[grafeas.ListOccurrencesRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListOccurrencesAsyncPager:
r"""Lists occurrences for the specified project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_list_occurrences():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.ListOccurrencesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_occurrences(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.ListOccurrencesRequest, dict]):
The request object. Request to list occurrences.
parent (:class:`str`):
The name of the project to list occurrences for in the
form of ``projects/[PROJECT_ID]``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (:class:`str`):
The filter expression.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.services.grafeas.pagers.ListOccurrencesAsyncPager:
Response for listing occurrences.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.ListOccurrencesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_occurrences,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListOccurrencesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_occurrence(
self,
request: Union[grafeas.DeleteOccurrenceRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified occurrence. For example, use
this method to delete an occurrence when the occurrence
is no longer applicable for the given resource.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_delete_occurrence():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.DeleteOccurrenceRequest(
name="name_value",
)
# Make the request
await client.delete_occurrence(request=request)
Args:
request (Union[grafeas.grafeas_v1.types.DeleteOccurrenceRequest, dict]):
The request object. Request to delete an occurrence.
name (:class:`str`):
The name of the occurrence in the form of
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.DeleteOccurrenceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_occurrence,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def create_occurrence(
self,
request: Union[grafeas.CreateOccurrenceRequest, dict] = None,
*,
parent: str = None,
occurrence: grafeas.Occurrence = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Occurrence:
r"""Creates a new occurrence.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_create_occurrence():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.CreateOccurrenceRequest(
parent="parent_value",
)
# Make the request
response = await client.create_occurrence(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.CreateOccurrenceRequest, dict]):
The request object. Request to create a new occurrence.
parent (:class:`str`):
The name of the project in the form of
``projects/[PROJECT_ID]``, under which the occurrence is
to be created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
occurrence (:class:`grafeas.grafeas_v1.types.Occurrence`):
The occurrence to create.
This corresponds to the ``occurrence`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Occurrence:
An instance of an analysis type that
has been found on a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, occurrence])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.CreateOccurrenceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if occurrence is not None:
request.occurrence = occurrence
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_occurrence,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def batch_create_occurrences(
self,
request: Union[grafeas.BatchCreateOccurrencesRequest, dict] = None,
*,
parent: str = None,
occurrences: Sequence[grafeas.Occurrence] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.BatchCreateOccurrencesResponse:
r"""Creates new occurrences in batch.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_batch_create_occurrences():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.BatchCreateOccurrencesRequest(
parent="parent_value",
)
# Make the request
response = await client.batch_create_occurrences(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.BatchCreateOccurrencesRequest, dict]):
The request object. Request to create occurrences in
batch.
parent (:class:`str`):
The name of the project in the form of
``projects/[PROJECT_ID]``, under which the occurrences
are to be created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
occurrences (:class:`Sequence[grafeas.grafeas_v1.types.Occurrence]`):
The occurrences to create. Max
allowed length is 1000.
This corresponds to the ``occurrences`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.BatchCreateOccurrencesResponse:
Response for creating occurrences in
batch.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, occurrences])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.BatchCreateOccurrencesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if occurrences:
request.occurrences.extend(occurrences)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_create_occurrences,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_occurrence(
self,
request: Union[grafeas.UpdateOccurrenceRequest, dict] = None,
*,
name: str = None,
occurrence: grafeas.Occurrence = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Occurrence:
r"""Updates the specified occurrence.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_update_occurrence():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.UpdateOccurrenceRequest(
name="name_value",
)
# Make the request
response = await client.update_occurrence(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.UpdateOccurrenceRequest, dict]):
The request object. Request to update an occurrence.
name (:class:`str`):
The name of the occurrence in the form of
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
occurrence (:class:`grafeas.grafeas_v1.types.Occurrence`):
The updated occurrence.
This corresponds to the ``occurrence`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The fields to update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Occurrence:
An instance of an analysis type that
has been found on a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, occurrence, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.UpdateOccurrenceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if occurrence is not None:
request.occurrence = occurrence
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_occurrence,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_occurrence_note(
self,
request: Union[grafeas.GetOccurrenceNoteRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Note:
r"""Gets the note attached to the specified occurrence.
Consumer projects can use this method to get a note that
belongs to a provider project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_get_occurrence_note():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.GetOccurrenceNoteRequest(
name="name_value",
)
# Make the request
response = await client.get_occurrence_note(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.GetOccurrenceNoteRequest, dict]):
The request object. Request to get the note to which the
specified occurrence is attached.
name (:class:`str`):
The name of the occurrence in the form of
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Note:
A type of analysis that can be done
for a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.GetOccurrenceNoteRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_occurrence_note,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_note(
self,
request: Union[grafeas.GetNoteRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Note:
r"""Gets the specified note.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_get_note():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.GetNoteRequest(
name="name_value",
)
# Make the request
response = await client.get_note(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.GetNoteRequest, dict]):
The request object. Request to get a note.
name (:class:`str`):
The name of the note in the form of
``projects/[PROVIDER_ID]/notes/[NOTE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Note:
A type of analysis that can be done
for a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.GetNoteRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_note,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_notes(
self,
request: Union[grafeas.ListNotesRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListNotesAsyncPager:
r"""Lists notes for the specified project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_list_notes():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.ListNotesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_notes(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.ListNotesRequest, dict]):
The request object. Request to list notes.
parent (:class:`str`):
The name of the project to list notes for in the form of
``projects/[PROJECT_ID]``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (:class:`str`):
The filter expression.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.services.grafeas.pagers.ListNotesAsyncPager:
Response for listing notes.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.ListNotesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_notes,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListNotesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_note(
self,
request: Union[grafeas.DeleteNoteRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified note.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_delete_note():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.DeleteNoteRequest(
name="name_value",
)
# Make the request
await client.delete_note(request=request)
Args:
request (Union[grafeas.grafeas_v1.types.DeleteNoteRequest, dict]):
The request object. Request to delete a note.
name (:class:`str`):
The name of the note in the form of
``projects/[PROVIDER_ID]/notes/[NOTE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.DeleteNoteRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_note,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def create_note(
self,
request: Union[grafeas.CreateNoteRequest, dict] = None,
*,
parent: str = None,
note_id: str = None,
note: grafeas.Note = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Note:
r"""Creates a new note.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_create_note():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.CreateNoteRequest(
parent="parent_value",
note_id="note_id_value",
)
# Make the request
response = await client.create_note(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.CreateNoteRequest, dict]):
The request object. Request to create a new note.
parent (:class:`str`):
The name of the project in the form of
``projects/[PROJECT_ID]``, under which the note is to be
created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
note_id (:class:`str`):
The ID to use for this note.
This corresponds to the ``note_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
note (:class:`grafeas.grafeas_v1.types.Note`):
The note to create.
This corresponds to the ``note`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Note:
A type of analysis that can be done
for a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, note_id, note])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.CreateNoteRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if note_id is not None:
request.note_id = note_id
if note is not None:
request.note = note
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_note,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def batch_create_notes(
self,
request: Union[grafeas.BatchCreateNotesRequest, dict] = None,
*,
parent: str = None,
notes: Mapping[str, grafeas.Note] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.BatchCreateNotesResponse:
r"""Creates new notes in batch.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_batch_create_notes():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.BatchCreateNotesRequest(
parent="parent_value",
)
# Make the request
response = await client.batch_create_notes(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.BatchCreateNotesRequest, dict]):
The request object. Request to create notes in batch.
parent (:class:`str`):
The name of the project in the form of
``projects/[PROJECT_ID]``, under which the notes are to
be created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
notes (:class:`Mapping[str, grafeas.grafeas_v1.types.Note]`):
The notes to create. Max allowed
length is 1000.
This corresponds to the ``notes`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.BatchCreateNotesResponse:
Response for creating notes in batch.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, notes])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.BatchCreateNotesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if notes:
request.notes.update(notes)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_create_notes,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_note(
self,
request: Union[grafeas.UpdateNoteRequest, dict] = None,
*,
name: str = None,
note: grafeas.Note = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> grafeas.Note:
r"""Updates the specified note.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_update_note():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.UpdateNoteRequest(
name="name_value",
)
# Make the request
response = await client.update_note(request=request)
# Handle the response
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.UpdateNoteRequest, dict]):
The request object. Request to update a note.
name (:class:`str`):
The name of the note in the form of
``projects/[PROVIDER_ID]/notes/[NOTE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
note (:class:`grafeas.grafeas_v1.types.Note`):
The updated note.
This corresponds to the ``note`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The fields to update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.types.Note:
A type of analysis that can be done
for a resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, note, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.UpdateNoteRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if note is not None:
request.note = note
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_note,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_note_occurrences(
self,
request: Union[grafeas.ListNoteOccurrencesRequest, dict] = None,
*,
name: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListNoteOccurrencesAsyncPager:
r"""Lists occurrences referencing the specified note.
Provider projects can use this method to get all
occurrences across consumer projects referencing the
specified note.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from grafeas import grafeas_v1
async def sample_list_note_occurrences():
# Create a client
client = grafeas_v1.GrafeasAsyncClient()
# Initialize request argument(s)
request = grafeas_v1.ListNoteOccurrencesRequest(
name="name_value",
)
# Make the request
page_result = client.list_note_occurrences(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[grafeas.grafeas_v1.types.ListNoteOccurrencesRequest, dict]):
The request object. Request to list occurrences for a
note.
name (:class:`str`):
The name of the note to list occurrences for in the form
of ``projects/[PROVIDER_ID]/notes/[NOTE_ID]``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (:class:`str`):
The filter expression.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
grafeas.grafeas_v1.services.grafeas.pagers.ListNoteOccurrencesAsyncPager:
Response for listing occurrences for
a note.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = grafeas.ListNoteOccurrencesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_note_occurrences,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListNoteOccurrencesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"grafeas",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("GrafeasAsyncClient",)
| {
"content_hash": "4b96658422c5fa372a081b5a9fba2ea9",
"timestamp": "",
"source": "github",
"line_count": 1769,
"max_line_length": 122,
"avg_line_length": 38.661390616167324,
"alnum_prop": 0.577479822201427,
"repo_name": "googleapis/python-grafeas",
"id": "392521062f22230b94aca620c85c4b6e1792f4b4",
"size": "68992",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "grafeas/grafeas_v1/services/grafeas/async_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "588952"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
urlpatterns = patterns("spirit.views.admin.category",
url(r'^$', 'category_list', name='admin-category'),
url(r'^list/$', 'category_list', name='admin-category-list'),
url(r'^create/$', 'category_create', name='admin-category-create'),
url(r'^update/(?P<category_id>\d+)/$', 'category_update', name='admin-category-update'),
) | {
"content_hash": "d2bd440cf67ad1aa6a45cedb9e331a04",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 92,
"avg_line_length": 43,
"alnum_prop": 0.6666666666666666,
"repo_name": "Si-elegans/Web-based_GUI_Tools",
"id": "7ce067e7836e47744d87613b295d2d688d1b163f",
"size": "411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spirit/urls/admin/category.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "183432"
},
{
"name": "HTML",
"bytes": "821815"
},
{
"name": "JavaScript",
"bytes": "5240621"
},
{
"name": "Python",
"bytes": "2130547"
}
],
"symlink_target": ""
} |
import unittest
from visitor import *
'''
Suite de pruebas unitarias para las clases de visitor.py
'''
class VisitorTest(unittest.TestCase):
def test_dot(self):
dot = Dot('dot-1', 5, 6)
self.assertIsInstance(dot, Shape)
self.assertIsInstance(dot, Dot)
self.assertEqual('dot-1', dot.id)
self.assertEqual(5, dot.x)
self.assertEqual(6, dot.y)
dot.move(10, 10)
self.assertEqual(10, dot.x)
self.assertEqual(10, dot.y)
self.assertEqual("Dot 'dot-1' drew at coordinates -> (10, 10)", dot.draw())
def test_circle(self):
circle = Circle('circle-1', 2, 2, 5)
self.assertIsInstance(circle, Shape)
self.assertIsInstance(circle, Circle)
self.assertEqual('circle-1', circle.id)
self.assertEqual(2, circle.x)
self.assertEqual(2, circle.y)
self.assertEqual(5, circle.radius)
circle.move(8, 4)
self.assertEqual(8, circle.x)
self.assertEqual(4, circle.y)
self.assertEqual("Circle 'circle-1' drew at coordinates -> (8, 4)", circle.draw())
def test_rectangle(self):
rectangle = Rectangle('rectangle-1', -5, -8, 4, 6)
self.assertIsInstance(rectangle, Shape)
self.assertIsInstance(rectangle, Rectangle)
self.assertEqual('rectangle-1', rectangle.id)
self.assertEqual(-5, rectangle.x)
self.assertEqual(-8, rectangle.y)
self.assertEqual(4, rectangle.width)
self.assertEqual(6, rectangle.height)
rectangle.move(-5, 8)
self.assertEqual(-5, rectangle.x)
self.assertEqual(8, rectangle.y)
self.assertEqual("Rectangle 'rectangle-1' drew at coordinates -> (-5, 8)", rectangle.draw())
def test_xml_visitor(self):
dot = Dot('dot-xml', 10, 6)
circle = Circle('circle-xml', 6, 2, 3)
rectangle = Rectangle('rectangle-xml', -5, 10, 5, 8)
xml_visitor = XMLExportVisitor()
self.assertEqual("XML export done for: <dot-xml coordinates=(10, 6)>", xml_visitor.visit_dot(dot))
self.assertEqual("XML export done for: <circle-xml radius=3 coordinates=(6, 2)>", xml_visitor.visit_circle(circle))
self.assertEqual("XML export done for: <rectangle-xml width=5 height=8 coordinates=(-5, 10)>", xml_visitor.visit_rectangle(rectangle))
def test_json_visitor(self):
dot = Dot('dot-json', 10, 6)
circle = Circle('circle-json', 6, 2, 3)
rectangle = Rectangle('rectangle-json', -5, 10, 5, 8)
json_visitor = JSONExportVisitor()
self.assertEqual("JSON export done for: <dot-json coordinates=(10, 6)>", json_visitor.visit_dot(dot))
self.assertEqual("JSON export done for: <circle-json radius=3 coordinates=(6, 2)>", json_visitor.visit_circle(circle))
self.assertEqual("JSON export done for: <rectangle-json width=5 height=8 coordinates=(-5, 10)>", json_visitor.visit_rectangle(rectangle))
def test_yaml_visitor(self):
dot = Dot('dot-yaml', 10, 6)
circle = Circle('circle-yaml', 6, 2, 3)
rectangle = Rectangle('rectangle-yaml', -5, 10, 5, 8)
yaml_visitor = YAMLExportVisitor()
self.assertEqual("YAML export done for: <dot-yaml coordinates=(10, 6)>", yaml_visitor.visit_dot(dot))
self.assertEqual("YAML export done for: <circle-yaml radius=3 coordinates=(6, 2)>", yaml_visitor.visit_circle(circle))
self.assertEqual("YAML export done for: <rectangle-yaml width=5 height=8 coordinates=(-5, 10)>", yaml_visitor.visit_rectangle(rectangle))
if __name__ == "__main__":
unittest.main() | {
"content_hash": "81272387a66bf263dcac4051599b1f51",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 145,
"avg_line_length": 45.4125,
"alnum_prop": 0.6289567850261492,
"repo_name": "AnhellO/DAS_Sistemas",
"id": "2d0078340568fca7b5876706057219709c1ac55b",
"size": "3633",
"binary": false,
"copies": "1",
"ref": "refs/heads/ene-jun-2022",
"path": "Ene-Jun-2022/juan-alejandro-calzoncit-rodriguez/Primer Parcial/Ejercicio4/visitor_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "8515"
},
{
"name": "Go",
"bytes": "25845"
},
{
"name": "HTML",
"bytes": "36671"
},
{
"name": "Python",
"bytes": "716604"
}
],
"symlink_target": ""
} |
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
from experiments.nmt import\
RNNEncoderDecoder,\
parse_input,\
get_batch_iterator,\
prototype_state
logger = logging.getLogger(__name__)
class BatchTxtIterator(object):
def __init__(self, state, txt, indx, batch_size, raise_unk, unk_sym=-1, null_sym=-1):
self.__dict__.update(locals())
self.__dict__.pop('self')
def start(self):
self.txt_file = open(self.txt)
def _pack(self, seqs):
num = len(seqs)
max_len = max(map(len, seqs))
x = numpy.zeros((num, max_len), dtype="int64")
x_mask = numpy.zeros((num, max_len), dtype="float32")
for i, seq in enumerate(seqs):
x[i, :len(seq)] = seq
x_mask[i, :len(seq)] = 1.0
return x.T, x_mask.T
def __iter__(self):
return self
def next(self):
seqs = []
try:
while len(seqs) < self.batch_size:
line = next(self.txt_file).strip()
seq, _ = parse_input(self.state, self.indx, line, raise_unk=self.raise_unk,
unk_sym=self.unk_sym, null_sym=self.null_sym)
seqs.append(seq)
return self._pack(seqs)
except StopIteration:
if not seqs:
raise StopIteration()
return self._pack(seqs)
class BatchBiTxtIterator(object):
def __init__(self, state, src, indx_src, trg, indx_trg, batch_size, raise_unk):
self.__dict__.update(locals())
self.__dict__.pop('self')
self.src_iter = BatchTxtIterator(state, src, indx_src, batch_size, raise_unk,
unk_sym=state['unk_sym_source'], null_sym=state['null_sym_source'])
self.trg_iter = BatchTxtIterator(state, trg, indx_trg, batch_size, raise_unk,
unk_sym=state['unk_sym_target'], null_sym=state['null_sym_target'])
def start(self):
self.src_iter.start()
self.trg_iter.start()
def __iter__(self):
return self
def next(self):
x, x_mask = next(self.src_iter)
y, y_mask = next(self.trg_iter)
assert x.shape[1] == y.shape[1]
return dict(x=x, x_mask=x_mask, y=y, y_mask=y_mask)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--state", required=True, help="State to use")
# Paths
parser.add_argument("--src", help="Source phrases")
parser.add_argument("--trg", help="Target phrases")
parser.add_argument("--scores", default=None, help="Save scores to")
parser.add_argument("model_path", help="Path to the model")
# Options
parser.add_argument("--print-probs", default=False, action="store_true",
help="Print probs instead of log probs")
parser.add_argument("--allow-unk", default=False, action="store_true",
help="Allow unknown words in the input")
parser.add_argument("--mode", default="interact",
help="Processing mode, one of 'batch', 'txt', 'interact'")
parser.add_argument("--n-batches", default=-1, type=int,
help="Score only first n batches")
parser.add_argument("--verbose", default=False, action="store_true",
help="Print more stuff")
parser.add_argument("--y-noise", type=float,
help="Probability for a word to be replaced by a random word")
# Additional arguments
parser.add_argument("changes", nargs="?", help="Changes to state", default="")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
state['sort_k_batches'] = 1
state['shuffle'] = False
state['use_infinite_loop'] = False
state['force_enc_repr_cpu'] = False
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True, compute_alignment=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word_src = cPickle.load(open(state['word_indx'],'rb'))
indx_word_trgt = cPickle.load(open(state['word_indx_trgt'], 'rb'))
if args.mode == "batch":
data_given = args.src or args.trg
txt = data_given and not (args.src.endswith(".h5") and args.trg.endswith(".h5"))
if data_given and not txt:
state['source'] = [args.src]
state['target'] = [args.trg]
if not data_given and not txt:
logger.info("Using the training data")
if txt:
data_iter = BatchBiTxtIterator(state,
args.src, indx_word_src, args.trg, indx_word_trgt,
state['bs'], raise_unk=not args.allow_unk)
data_iter.start()
else:
data_iter = get_batch_iterator(state)
data_iter.start(0)
score_file = open(args.scores, "w") if args.scores else sys.stdout
scorer = enc_dec.create_scorer(batch=True)
count = 0
n_samples = 0
logger.info('Scoring phrases')
for i, batch in enumerate(data_iter):
if batch == None:
continue
if args.n_batches >= 0 and i == args.n_batches:
break
if args.y_noise:
y = batch['y']
random_words = numpy.random.randint(0, 100, y.shape).astype("int64")
change_mask = numpy.random.binomial(1, args.y_noise, y.shape).astype("int64")
y = change_mask * random_words + (1 - change_mask) * y
batch['y'] = y
st = time.time()
[scores] = scorer(batch['x'], batch['y'],
batch['x_mask'], batch['y_mask'])
if args.print_probs:
scores = numpy.exp(scores)
up_time = time.time() - st
for s in scores:
print >>score_file, "{:.5e}".format(float(s))
n_samples += batch['x'].shape[1]
count += 1
if count % 100 == 0:
score_file.flush()
logger.debug("Scores flushed")
logger.debug("{} batches, {} samples, {} per sample; example scores: {}".format(
count, n_samples, up_time/scores.shape[0], scores[:5]))
logger.info("Done")
score_file.flush()
elif args.mode == "interact":
scorer = enc_dec.create_scorer()
while True:
try:
compute_probs = enc_dec.create_probs_computer()
src_line = raw_input('Source sequence: ')
trgt_line = raw_input('Target sequence: ')
src_seq = parse_input(state, indx_word_src, src_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_source'], null_sym=state['null_sym_source'])
trgt_seq = parse_input(state, indx_word_trgt, trgt_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_target'], null_sym=state['null_sym_target'])
print "Binarized source: ", src_seq
print "Binarized target: ", trgt_seq
probs = compute_probs(src_seq, trgt_seq)
print "Probs: {}, cost: {}".format(probs, -numpy.sum(numpy.log(probs)))
except Exception:
traceback.print_exc()
elif args.mode == "txt":
assert args.src and args.trg
scorer = enc_dec.create_scorer()
src_file = open(args.src, "r")
trg_file = open(args.trg, "r")
compute_probs = enc_dec.create_probs_computer(return_alignment=True)
try:
numpy.set_printoptions(precision=3, linewidth=150, suppress=True)
i = 0
while True:
src_line = next(src_file).strip()
trgt_line = next(trg_file).strip()
src_seq, src_words = parse_input(state,
indx_word_src, src_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_source'], null_sym=state['null_sym_source'])
trgt_seq, trgt_words = parse_input(state,
indx_word_trgt, trgt_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_target'], null_sym=state['null_sym_target'])
probs, alignment = compute_probs(src_seq, trgt_seq)
if args.verbose:
print "Probs: ", probs.flatten()
if alignment.ndim == 3:
print "Alignment:".ljust(20), src_line, "<eos>"
for i, word in enumerate(trgt_words):
print "{}{}".format(word.ljust(20), alignment[i, :, 0])
print "Generated by:"
for i, word in enumerate(trgt_words):
j = numpy.argmax(alignment[i, :, 0])
print "{} <--- {}".format(word,
src_words[j] if j < len(src_words) else "<eos>")
i += 1
if i % 100 == 0:
sys.stdout.flush()
logger.debug(i)
print -numpy.sum(numpy.log(probs))
except StopIteration:
pass
else:
raise Exception("Unknown mode {}".format(args.mode))
if __name__ == "__main__":
main()
| {
"content_hash": "edfe41f9f015d179e5760abe5e612143",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 123,
"avg_line_length": 39.57959183673469,
"alnum_prop": 0.5366608229349283,
"repo_name": "zerkh/GroundHog",
"id": "d62dfdb8864c914ee22272e984af7e0ffb8eca85",
"size": "9720",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "experiments/nmt/score.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8023"
},
{
"name": "PHP",
"bytes": "372"
},
{
"name": "Perl",
"bytes": "28016"
},
{
"name": "Python",
"bytes": "462113"
},
{
"name": "Shell",
"bytes": "2189"
}
],
"symlink_target": ""
} |
"""Twitter-style pagination integration tests."""
from __future__ import unicode_literals
from el_pagination.tests.integration import SeleniumTestCase
class TwitterPaginationTest(SeleniumTestCase):
view_name = 'twitter'
def test_new_elements_loaded(self):
# Ensure a new page is loaded on click.
self.get()
with self.assertNewElements('object', range(1, 11)):
self.click_link(self.MORE)
def test_url_not_changed(self):
# Ensure the request is done using Ajax (the page does not refresh).
self.get()
with self.assertSameURL():
self.click_link(self.MORE)
def test_direct_link(self):
# Ensure direct links work.
self.get(page=4)
self.assertElements('object', range(16, 21))
self.assertIn('page=4', self.selenium.current_url)
def test_subsequent_page(self):
# Ensure next page is correctly loaded in a subsequent page.
self.get(page=2)
with self.assertNewElements('object', range(6, 16)):
self.click_link(self.MORE)
def test_multiple_show_more(self):
# Ensure new pages are loaded again and again.
self.get()
for page in range(2, 5):
expected_range = range(1, 5 * page + 1)
with self.assertSameURL():
with self.assertNewElements('object', expected_range):
self.click_link(self.MORE)
def test_no_more_link_in_last_page(self):
# Ensure there is no more link on the last page.
self.get(page=10)
self.asserLinksEqual(0, self.MORE)
| {
"content_hash": "b43f31acf79db8d600e4ef06475b0de7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 33.541666666666664,
"alnum_prop": 0.6242236024844721,
"repo_name": "dehu4ka/lna",
"id": "f48c42322fd878d0a610207a31113db7c585bc6e",
"size": "1610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "el_pagination/tests/integration/test_twitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7385"
},
{
"name": "HTML",
"bytes": "75367"
},
{
"name": "JavaScript",
"bytes": "106914"
},
{
"name": "Python",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
import pprint
import re
def get_user(element):
return
def process_map(filename):
"""
Count the user id in the filename.
"""
users = set()
for _, element in ET.iterparse(filename):
try:
users.add(element.attrib['uid'])
except KeyError:
continue
return users
def test():
users = process_map(OSM_FILE)
pprint.pprint(users)
# assert len(users) == 6
if __name__ == "__main__":
test() | {
"content_hash": "74feb7b03a12446b7a560f83c8fdcfee",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 45,
"avg_line_length": 15.84375,
"alnum_prop": 0.5798816568047337,
"repo_name": "napjon/ds-nd",
"id": "9c8e52cec2aec27dcd3054eb9d854979c923fe47",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p3-wrangling/project.osm/02-codes/users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7031223"
},
{
"name": "Jupyter Notebook",
"bytes": "796962"
},
{
"name": "Python",
"bytes": "39373"
}
],
"symlink_target": ""
} |
"""SCons.Tool.Packaging.zip
The zip SRC packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/zip.py 2014/09/27 12:51:43 garyo"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Zip']
bld.set_suffix('.zip')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "def85fe5871bef9366c280a75f48d258",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 82,
"avg_line_length": 39.13636363636363,
"alnum_prop": 0.7531939605110337,
"repo_name": "j-faria/OPEN",
"id": "09a320d51b10e0700db6c672489822aec3895087",
"size": "1722",
"binary": false,
"copies": "9",
"ref": "refs/heads/develop",
"path": "scons/scons-local-2.3.4/SCons/Tool/packaging/zip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23647"
},
{
"name": "Fortran",
"bytes": "395666"
},
{
"name": "Makefile",
"bytes": "1443"
},
{
"name": "Python",
"bytes": "2424612"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.common import topics
from neutron import context
from neutron.extensions import firewall as fw_ext
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.firewall.agents import firewall_agent_api as api
LOG = logging.getLogger(__name__)
class FWaaSL3PluginApi(api.FWaaSPluginApiMixin):
"""Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
def get_firewalls_for_tenant(self, context, **kwargs):
"""Get the Firewalls with rules from the Plugin to send to driver."""
LOG.debug(_("Retrieve Firewall with rules from Plugin"))
return self.client.call(context, 'get_firewalls_for_tenant',
host=self.host)
def get_tenants_with_firewalls(self, context, **kwargs):
"""Get all Tenants that have Firewalls configured from plugin."""
LOG.debug(_("Retrieve Tenants with Firewalls configured from Plugin"))
return self.client.call(context, 'get_tenants_with_firewalls',
host=self.host)
class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin):
"""FWaaS Agent support to be used by Neutron L3 agent."""
def __init__(self, conf):
LOG.debug(_("Initializing firewall agent"))
self.conf = conf
fwaas_driver_class_path = cfg.CONF.fwaas.driver
self.fwaas_enabled = cfg.CONF.fwaas.enabled
if self.fwaas_enabled:
try:
self.fwaas_driver = importutils.import_object(
fwaas_driver_class_path)
LOG.debug(_("FWaaS Driver Loaded: '%s'"),
fwaas_driver_class_path)
except ImportError:
msg = _('Error importing FWaaS device driver: %s')
raise ImportError(msg % fwaas_driver_class_path)
self.services_sync = False
self.root_helper = config.get_root_helper(conf)
# setup RPC to msg fwaas plugin
self.fwplugin_rpc = FWaaSL3PluginApi(topics.FIREWALL_PLUGIN,
conf.host)
super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host)
def _get_router_info_list_for_tenant(self, routers, tenant_id):
"""Returns the list of router info objects on which to apply the fw."""
root_ip = ip_lib.IPWrapper(self.root_helper)
# Get the routers for the tenant
router_ids = [
router['id']
for router in routers
if router['tenant_id'] == tenant_id]
local_ns_list = root_ip.get_namespaces(
self.root_helper) if self.conf.use_namespaces else []
router_info_list = []
# Pick up namespaces for Tenant Routers
for rid in router_ids:
if self.router_info[rid].use_namespaces:
router_ns = self.router_info[rid].ns_name()
if router_ns in local_ns_list:
router_info_list.append(self.router_info[rid])
else:
router_info_list.append(self.router_info[rid])
return router_info_list
def _invoke_driver_for_plugin_api(self, context, fw, func_name):
"""Invoke driver method for plugin API and provide status back."""
LOG.debug(_("%(func_name)s from agent for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
try:
routers = self.plugin_rpc.get_routers(context)
router_info_list = self._get_router_info_list_for_tenant(
routers,
fw['tenant_id'])
if not router_info_list:
LOG.debug(_('No Routers on tenant: %s'), fw['tenant_id'])
# fw was created before any routers were added, and if a
# delete is sent then we need to ack so that plugin can
# cleanup.
if func_name == 'delete_firewall':
self.fwplugin_rpc.firewall_deleted(context, fw['id'])
return
LOG.debug(_("Apply fw on Router List: '%s'"),
[ri.router['id'] for ri in router_info_list])
# call into the driver
try:
self.fwaas_driver.__getattribute__(func_name)(
router_info_list,
fw)
if fw['admin_state_up']:
status = constants.ACTIVE
else:
status = constants.DOWN
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error for %(func_name)s "
"for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
status = constants.ERROR
# delete needs different handling
if func_name == 'delete_firewall':
if status in [constants.ACTIVE, constants.DOWN]:
self.fwplugin_rpc.firewall_deleted(context, fw['id'])
else:
self.fwplugin_rpc.set_firewall_status(
context,
fw['id'],
status)
except Exception:
LOG.exception(
_("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
self.services_sync = True
return
def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw):
"""Invoke the delete driver method for status of PENDING_DELETE and
update method for all other status to (re)apply on driver which is
Idempotent.
"""
if fw['status'] == constants.PENDING_DELETE:
try:
self.fwaas_driver.delete_firewall(router_info_list, fw)
self.fwplugin_rpc.firewall_deleted(
ctx,
fw['id'])
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s "
"for fw: %(fwid)s"),
{'fwmsg': fw['status'], 'fwid': fw['id']})
self.fwplugin_rpc.set_firewall_status(
ctx,
fw['id'],
constants.ERROR)
else:
# PENDING_UPDATE, PENDING_CREATE, ...
try:
self.fwaas_driver.update_firewall(router_info_list, fw)
if fw['admin_state_up']:
status = constants.ACTIVE
else:
status = constants.DOWN
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s "
"for fw: %(fwid)s"),
{'fwmsg': fw['status'], 'fwid': fw['id']})
status = constants.ERROR
self.fwplugin_rpc.set_firewall_status(
ctx,
fw['id'],
status)
def _process_router_add(self, ri):
"""On router add, get fw with rules from plugin and update driver."""
LOG.debug(_("Process router add, router_id: '%s'"), ri.router['id'])
routers = []
routers.append(ri.router)
router_info_list = self._get_router_info_list_for_tenant(
routers,
ri.router['tenant_id'])
if router_info_list:
# Get the firewall with rules
# for the tenant the router is on.
ctx = context.Context('', ri.router['tenant_id'])
fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
LOG.debug(_("Process router add, fw_list: '%s'"),
[fw['id'] for fw in fw_list])
for fw in fw_list:
self._invoke_driver_for_sync_from_plugin(
ctx,
router_info_list,
fw)
def process_router_add(self, ri):
"""On router add, get fw with rules from plugin and update driver."""
# avoid msg to plugin when fwaas is not configured
if not self.fwaas_enabled:
return
try:
self._process_router_add(ri)
except Exception:
LOG.exception(
_("FWaaS RPC info call failed for '%s'."),
ri.router['id'])
self.services_sync = True
def process_services_sync(self, ctx):
"""On RPC issues sync with plugin and apply the sync data."""
# avoid msg to plugin when fwaas is not configured
if not self.fwaas_enabled:
return
try:
# get all routers
routers = self.plugin_rpc.get_routers(ctx)
# get the list of tenants with firewalls configured
# from the plugin
tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx)
LOG.debug(_("Tenants with Firewalls: '%s'"), tenant_ids)
for tenant_id in tenant_ids:
ctx = context.Context('', tenant_id)
fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
if fw_list:
# if fw present on tenant
router_info_list = self._get_router_info_list_for_tenant(
routers,
tenant_id)
if router_info_list:
LOG.debug(_("Router List: '%s'"),
[ri.router['id'] for ri in router_info_list])
LOG.debug(_("fw_list: '%s'"),
[fw['id'] for fw in fw_list])
# apply sync data on fw for this tenant
for fw in fw_list:
# fw, routers present on this host for tenant
# install
LOG.debug(_("Apply fw on Router List: '%s'"),
[ri.router['id']
for ri in router_info_list])
# no need to apply sync data for ACTIVE fw
if fw['status'] != constants.ACTIVE:
self._invoke_driver_for_sync_from_plugin(
ctx,
router_info_list,
fw)
self.services_sync = False
except Exception:
LOG.exception(_("Failed fwaas process services sync"))
self.services_sync = True
def create_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to create a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'create_firewall')
def update_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to update a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'update_firewall')
def delete_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to delete a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'delete_firewall')
| {
"content_hash": "014cb8812fff52e92f614705341144f4",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 79,
"avg_line_length": 43.59695817490494,
"alnum_prop": 0.5244200244200244,
"repo_name": "beagles/neutron_hacking",
"id": "62f9d9ef3257b4b616dc4cd694d154bef46c393b",
"size": "12367",
"binary": false,
"copies": "1",
"ref": "refs/heads/neutron_oslo_messaging",
"path": "neutron/services/firewall/agents/l3reference/firewall_l3_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8125263"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
Test removing groups from users in the fake profile server.
"""
from __future__ import absolute_import
# pylint:disable=no-name-in-module
from nose.tools import assert_not_in
from . import FakeProfileServerTestCase
class RemoveGroupsTestCase(FakeProfileServerTestCase):
"""
Test removing groups from users.
"""
def test_remove_groups(self):
"""
Test the user is removed from all the groups in the list
"""
calculon = {
'email': '[email protected]',
}
fry = {
'email': '[email protected]',
}
# Create the groups by adding users to groups
self.mock_ps.add_groups(calculon, ['group1', 'group2'])
self.mock_ps.add_groups(fry, ['group1'])
# Remove user from a group he doesn't belong to
self.mock_ps.remove_groups(fry, ['group1', 'group2'])
users = self.mock_ps.get_group('group1')
assert_not_in('[email protected]',
[user['email'] for user in users])
users = self.mock_ps.get_group('group2')
assert_not_in('[email protected]',
[user['email'] for user in users])
| {
"content_hash": "2a686456cd910a75de3062698d06e6a6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 64,
"avg_line_length": 28.441860465116278,
"alnum_prop": 0.5968928863450531,
"repo_name": "infoxchange/ixprofile-client",
"id": "9333265cf750110fbfda34b5245fae1600701e06",
"size": "1223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ixprofile_client/tests/fake_profile_server/test_remove_groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "4603"
},
{
"name": "HTML",
"bytes": "611"
},
{
"name": "Python",
"bytes": "92951"
}
],
"symlink_target": ""
} |
'''
Given a singly linked list L: L0->L1->...->Ln-1->Ln,
reorder it to: L0->Ln->L1->Ln->Ln-1->L2->Ln-2->...
You must do this in-place without altering the nodes' values.
For example,
Given {1,2,3,4}, reorder it to {1,4,2,3}.
'''
from node_struct import ListNode
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
if (not head):
return None
quick_cursor = head
slow_cursor = head
while (quick_cursor and quick_cursor.next):
quick_cursor = quick_cursor.next.next
slow_cursor = slow_cursor.next
sec_head = slow_cursor.next
slow_cursor.next = None
new_head = self.reverse(sec_head)
self.zip(head, new_head)
def zip(self, head1, head2):
pivot = ListNode(0)
pivot.next = head1
cursor = pivot
while (head1 or head2):
if head1:
cursor.next=head1
cursor = cursor.next
head1 = head1.next
if head2:
cursor.next=head2
cursor = cursor.next
head2 = head2.next
return pivot.next
def reverse(self, head):
if (not head or not head.next):
return head
prev = None
curr = head
next = head.next
while curr.next:
next = curr.next
curr.next = prev
prev = curr
curr = next
curr.next = prev
return curr
if __name__ == '__main__':
list_array = list()
head = ListNode(0)
cursor_node = head
for index in range(1,5):
cursor_node.next = ListNode(index)
cursor_node = cursor_node.next
solution = Solution()
new_head = solution.reorderList(head)
while new_head:
print new_head.x
new_head = new_head.next
| {
"content_hash": "f25f41d221baf7faf4b948e26d375314",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5381074168797954,
"repo_name": "shub0/algorithm-data-structure",
"id": "9c8aa0f3079d8f485fa6deeca22514b43fac4fee",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/reorder_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "166293"
},
{
"name": "Python",
"bytes": "487573"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
# local imports
from .UpdateResourcesParams import UpdateResourcesParams
class AddResourcesParams(UpdateResourcesParams):
"""Holds parameter values for the "addResources" operation on a user content item."""
@property
def archive(self):
self._get_nullable("archive")
@archive.setter
def archive(self, value):
self._set_nullable_bool("archive", value) | {
"content_hash": "b30816750c2e4d55e9da0f82ab4d9af4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 117,
"avg_line_length": 36.88235294117647,
"alnum_prop": 0.6953748006379585,
"repo_name": "DavidWhittingham/agsadmin",
"id": "7a791e7ec9ac48dbc4c7f9944d7c5c84036ee1c6",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "agsadmin/sharing_admin/content/users/AddResourcesParams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "442"
},
{
"name": "Python",
"bytes": "173794"
}
],
"symlink_target": ""
} |
import logging
import random
import time
log = logging.getLogger(__name__)
class LoadTest(object):
def __init__(self, gun):
self.gun = gun
def case1(self, task):
with self.gun.measure(task):
log.info("Shoot case 1: %s", task.data)
time.sleep(random.random())
def case2(self, task):
with self.gun.measure(task) as m:
m.action = "PREPARE"
log.info("Prepare case 2: %s", task.data)
time.sleep(random.random())
with self.gun.measure(task) as m:
m.action = "SHOOT"
log.info("Shoot case 2: %s", task.data)
time.sleep(random.random())
raise RuntimeError()
def default(self, task):
with self.gun.measure(task):
log.info("Shoot default case: %s", task.data)
time.sleep(random.random())
def setup(self, param):
log.info("Setting up LoadTest: %s", param)
def teardown(self):
log.info("Tearing down LoadTest")
| {
"content_hash": "a5ce872c07c47c676cba2305204fa9c2",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 57,
"avg_line_length": 27.513513513513512,
"alnum_prop": 0.5618860510805501,
"repo_name": "direvius/bfg",
"id": "b25713e8b086533945bdeaa6b06ac258d583dd94",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/examples/gun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50193"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cfsite.settings.prod")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "86326af8c3e84afb75b5ba8d98e27bc2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.7112068965517241,
"repo_name": "susanctu/Crazyfish-Public",
"id": "de279032a495dd17a276ffbf020a9add27be1164",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54584"
},
{
"name": "JavaScript",
"bytes": "71795"
},
{
"name": "Python",
"bytes": "116379"
}
],
"symlink_target": ""
} |
import os
import zlib
import pytest
from ..algorithms import crc32
crc32_implementations = [crc32.crc32_slice_by_8]
if crc32.have_clmul:
crc32_implementations.append(crc32.crc32_clmul)
@pytest.mark.parametrize('implementation', crc32_implementations)
def test_crc32(implementation):
# This includes many critical values, like zero length, 3/4/5, 6/7/8 and so on which are near and on
# alignment boundaries. This is of course just a sanity check ie. "did it compile all right?".
data = os.urandom(256)
initial_crc = 0x12345678
for i in range(0, 256):
d = data[:i]
assert zlib.crc32(d, initial_crc) == implementation(d, initial_crc)
| {
"content_hash": "fd09eda56f82b4ea775b526e90866c12",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 32.23809523809524,
"alnum_prop": 0.7149187592319055,
"repo_name": "edgimar/borg",
"id": "4eb59fa880ce4d7b5042d0b8b3f549a4253f5fa6",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/borg/testsuite/crc32.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "98810"
},
{
"name": "HTML",
"bytes": "66027"
},
{
"name": "Python",
"bytes": "1124319"
},
{
"name": "Shell",
"bytes": "2267"
}
],
"symlink_target": ""
} |
"""The influence of windowing of log. sweep signals when using a
Kaiser Window by fixing beta (=7) and fade_out (=0).
fstart = 1 Hz
fstop = 22050 Hz
FIR-Filter: Bandstop
Deconvolution: Unwindowed Excitation
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import ir_imitation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
awgn = -30
noise_system = measurement_chain.additive_noise(awgn)
# FIR-Filter-System
# FIR-Filter-System
f_low = 5000
f_high = 6000
order = 2
bandstop_system = measurement_chain.bandstop(f_low, f_high, fs, order)
# Combinate system elements
system = measurement_chain.chained(bandstop_system, noise_system)
# Lists
beta = 7
fade_in_list = np.arange(0, 1001, 1)
fade_out = 0
t_noise = 0.004
# Spectrum of bandstop for reference
bandstop_f = calculation.butter_bandstop(f_low, f_high, fs, N * 2 + 1, order)
def get_results(fade_in):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_script3_1.txt", "w") as f:
for fade_in in fade_in_list:
ir = get_results(fade_in)
pnr = calculation.pnr_db(ir[0], ir[t_noise * fs:pad * fs])
spectrum_distance = calculation.vector_distance(
bandstop_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_in) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| {
"content_hash": "603f06d8371c9bd46fd1f57d7116cc09",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 24.904255319148938,
"alnum_prop": 0.6202477573686459,
"repo_name": "spatialaudio/sweep",
"id": "773169f2bd18ec305e4faa5564ad58df92b4cab0",
"size": "2365",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "log_sweep_kaiser_window_script3/log_sweep_kaiser_window_script3_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101013"
}
],
"symlink_target": ""
} |
from apiclient.discovery import build
from apiclient.errors import HttpError, UnknownApiNameOrVersion
import os
from httplib2 import ServerNotFoundError
YOUTUBE_DEVELOPER_KEY = os.environ.get('YT_AUTH')
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
TERMS = ['remix', 'music', 'song', 'parody', 'jam', 'dance']
def youtube_search(keyword, max_results=20):
"""Query YouTube API for search results based off keyword search."""
try:
youtube = build(
YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=YOUTUBE_DEVELOPER_KEY
)
search_response = youtube.search(
).list(
q=keyword + ' music',
type='video',
part='id,snippet',
maxResults=max_results).execute()
return search_response
except HttpError as err:
print('An HTTP error has occurred. Please check YT Developer Key.')
return err
except UnknownApiNameOrVersion:
print('Please check your API name or version.')
raise UnknownApiNameOrVersion
except ServerNotFoundError:
print('Server not found. Please connect and try again.')
raise ServerNotFoundError
except TypeError:
print('Keyword must be a string.')
raise TypeError
def youtube_parse(search_result):
"""Parse the YouTube search result to output video ID tag."""
video_id_uris = []
try:
search_items = search_result.get('items', [])
for result in search_items:
video_id = result['id']['videoId']
video_channel = result['snippet']['channelTitle']
video_title = result['snippet']['title']
video_id_uris.append((video_id, video_channel, video_title))
return video_id_uris
except AttributeError:
return video_id_uris
def term_checker(title):
"""Check to see if keywords are in the title of the video."""
for term in TERMS:
if term in title.lower():
return True
return False
def url_gen(video_id):
"""Create and return the url for the given video."""
yt_path = 'https://www.youtube.com/'
yt_uri = 'watch?v=' + video_id
yt_url = yt_path + yt_uri
return yt_url
def generate_youtube_link(parsed_list):
"""Generate a youtube video link from parsed list of YouTube video IDs."""
try:
for video in parsed_list:
if 'VEVO' in video[1]:
return (url_gen(video[0]), True)
for video in parsed_list:
if term_checker(video[2]):
return (url_gen(video[0]), True)
return (url_gen(parsed_list[0][0]), False)
except IndexError:
return (url_gen('b_ILDFp5DGA'), False)
def get_link(trend):
"""Get the single link from the entered trend."""
return generate_youtube_link(youtube_parse(youtube_search(trend)))
| {
"content_hash": "21ebd9f8303a2585cc1e40ffe7dcdfa2",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 32.741573033707866,
"alnum_prop": 0.6190803019903912,
"repo_name": "icarrera/twitter-tunes",
"id": "0286657ef612f145642a78d6c5aabf8be41481a7",
"size": "2929",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "twitter_tunes/scripts/youtube_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17144"
},
{
"name": "JavaScript",
"bytes": "1510"
},
{
"name": "Python",
"bytes": "55059"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
from djangotoolbox.db.base import NonrelDatabaseIntrospection
class DatabaseIntrospection(NonrelDatabaseIntrospection):
# def __init__(self, manager):
# self.manager = manager
def get_cass_keyspace_list(self):
return self.manager.list_keyspaces()
def get_cass_keyspace_properties(self, keyspace_name):
return self.manager.get_keyspace_properties(keyspace_name)
def get_cass_column_families(self):
return self.connection.get().get_keyspace_description().keys()
def get_cass_keyspace_column_families(self, keyspace_name):
return self.manager.get_keyspace_column_families(keyspace_name) | {
"content_hash": "2fee939b244dbb95e4b06794853ccfe1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 27.636363636363637,
"alnum_prop": 0.7796052631578947,
"repo_name": "jabadabadu/cassango",
"id": "2a6ce59447ab9ff1b3ae01a1a41d3be71a1aff28",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cassango/introspection.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25698"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("GaussianNB" , "iris" , "postgresql")
| {
"content_hash": "0d7c241f3cd16f7c5cd388d725f80bfb",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 33.5,
"alnum_prop": 0.7761194029850746,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "78333c9367995c94ec30ae5f28312d16d84f9556",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/iris/ws_iris_GaussianNB_postgresql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import os.path
import unittest
import angr
import ailment
from angr.analyses.decompiler.optimization_passes.base_ptr_save_simplifier import (
BasePointerSaveSimplifier,
)
test_location = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "..", "binaries", "tests"
)
def _get_block(clinic, addr):
for block in clinic.graph.nodes():
if block.addr == addr:
return block
return None
def check_bp_save_fauxware(arch):
p = angr.Project(
os.path.join(test_location, arch, "fauxware"), auto_load_libs=False
)
cfg = p.analyses.CFGFast(normalize=True)
main = p.kb.functions["main"]
optimization_passes = [BasePointerSaveSimplifier]
dra = p.analyses.Decompiler(main, cfg=cfg, optimization_passes=optimization_passes)
first_block_stmts = dra.codegen._sequence.nodes[0].nodes[0].statements
for stmt in first_block_stmts:
if isinstance(stmt, ailment.Stmt.Store):
assert not (
isinstance(stmt.data, ailment.Expr.Register)
and stmt.data.reg_offset == p.arch.bp_offset
) or (
isinstance(stmt.data, ailment.Expr.StackBaseOffset)
and stmt.data.offset == 0
)
# pylint: disable=missing-class-docstring
# pylint: disable=no-self-use
class TestBaseptrSaveSimplifier(unittest.TestCase):
def test_baseptr_save_simplifier_amd64(self):
# decompile all:main and make sure the first and the last blocks do not save or restore to rbp
bin_path = os.path.join(test_location, "x86_64", "all")
proj = angr.Project(bin_path, auto_load_libs=False, load_debug_info=True)
cfg = proj.analyses.CFG(data_references=True, normalize=True)
optimization_passes = [BasePointerSaveSimplifier]
main_func = cfg.functions["main"]
dec = proj.analyses.Decompiler(
main_func, cfg=cfg, optimization_passes=optimization_passes
)
entry_block = _get_block(dec.clinic, main_func.addr)
endpoint_block = _get_block(dec.clinic, next(iter(main_func.endpoints)).addr)
assert entry_block is not None
assert endpoint_block is not None
for stmt in entry_block.statements:
if isinstance(stmt, ailment.Stmt.Store) and isinstance(
stmt.data, ailment.Expr.StackBaseOffset
):
assert (
False
), "Found a base-pointer saving statement in the first block."
for stmt in endpoint_block.statements:
if (
isinstance(stmt, ailment.Stmt.Assignment)
and isinstance(stmt.dst, ailment.Expr.Register)
and stmt.dst.reg_offset == proj.arch.bp_offset
):
assert (
False
), "Found a base-pointer restoring statement in the last block."
def test_bp_save_amd64_fauxware(self):
check_bp_save_fauxware("x86_64")
def test_bp_save_armel_fauxware(self):
check_bp_save_fauxware("armel")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "37d45c75e21c61484ce81a61862d241f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 102,
"avg_line_length": 34.30769230769231,
"alnum_prop": 0.6236386931454196,
"repo_name": "angr/angr",
"id": "cd762a942cbd17c2c904250dd3164a22a4964307",
"size": "3122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_baseptr_save_simplifier.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
} |
"""AUACM top package"""
import auacm.common, auacm.main, auacm.user, auacm.problems, auacm.submit, auacm.competition, auacm.exceptions
from auacm.common import DEBUG, BASE_URL, logo, session
import auacm.utils
# Package metadata
__description__ = 'A command line interface to the Auburn ACM website'
__license__ = 'MIT'
__uri__ = 'https://github.com/BrandonLMorris/auacm-cli'
__version__ = '0.4'
__author__ = 'Brandon Morris'
__email__ = '[email protected]'
| {
"content_hash": "c7958cc779eae7c9b1367a3c5cabdb3a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 110,
"avg_line_length": 33.57142857142857,
"alnum_prop": 0.7191489361702128,
"repo_name": "BrandonLMorris/auacm-cli",
"id": "2847bc08fadd409b0c9fa9e994b7fc61fd6f0b87",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/auacm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37369"
}
],
"symlink_target": ""
} |
from webob import exc
from nova.api.openstack import wsgi
class ConsolesController(wsgi.Controller):
"""(Removed) The Consoles controller for the OpenStack API.
This was removed during the Ussuri release along with the nova-console
service.
"""
@wsgi.expected_errors(410)
def index(self, req, server_id):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def create(self, req, server_id, body):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def show(self, req, server_id, id):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def delete(self, req, server_id, id):
raise exc.HTTPGone()
| {
"content_hash": "814b4c3428a501aee3cefdb06fdc4db7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.6636904761904762,
"repo_name": "mahak/nova",
"id": "16243d56ff06fc5f103501c6a77bbadbe0efefab",
"size": "1308",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/consoles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
} |
"""Commands that can be used to operate on activity summaries."""
from constants import constants
from core.domain import activity_services
from core.domain import collection_services
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import stats_jobs_continuous
from core.domain import user_services
import utils
_LIBRARY_INDEX_GROUPS = [{
'header_i18n_id': 'I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS',
'search_categories': [
'Mathematics', 'Algebra', 'Arithmetic', 'Calculus', 'Combinatorics',
'Geometry', 'Graph Theory', 'Logic', 'Probability', 'Statistics',
'Trigonometry',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_COMPUTING',
'search_categories': ['Algorithms', 'Computing', 'Programming'],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SCIENCE',
'search_categories': [
'Astronomy', 'Biology', 'Chemistry', 'Engineering', 'Environment',
'Medicine', 'Physics',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_HUMANITIES',
'search_categories': [
'Architecture', 'Art', 'Music', 'Philosophy', 'Poetry'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_LANGUAGES',
'search_categories': [
'Languages', 'Reading', 'English', 'Latin', 'Spanish', 'Gaulish'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE',
'search_categories': [
'Business', 'Economics', 'Geography', 'Government', 'History', 'Law'
],
}]
def get_human_readable_contributors_summary(contributors_summary):
"""Gets contributors summary in human readable form.
Args:
contributors_summary: dict. The keys are user ids and
the values are the number of commits made by that user.
Returns:
dict. Dicts of contributors in human readable form; the keys are
usernames and the values are a dict. Example:
{
'albert': {
'num_commits': 10,
},
}
"""
contributor_ids = contributors_summary.keys()
contributor_usernames = user_services.get_human_readable_user_ids(
contributor_ids)
return {
contributor_usernames[ind]: {
'num_commits': contributors_summary[contributor_ids[ind]],
}
for ind in xrange(len(contributor_ids))
}
def get_learner_collection_dict_by_id(
collection_id, user_id, strict=True, allow_invalid_explorations=False,
version=None):
"""Gets a dictionary representation of a collection given by the provided
collection ID. This dict includes user-specific playthrough information.
Args:
collection_id: str. The id of the collection.
user_id: str. The user_id of the learner.
strict: bool. Whether to fail noisily if no collection with the given
id exists in the datastore.
allow_invalid_explorations: bool. Whether to also return explorations
that are invalid, such as deleted/private explorations.
version: str or None. The version number of the collection to be
retrieved. If it is None, the latest version will be retrieved.
Returns:
dict. A dictionary that contains extra information along with the dict
returned by collection_domain.Collection.to_dict() which includes useful
data for the collection learner view. The information includes progress
in the collection, information about explorations referenced within the
collection, and a slightly nicer data structure for frontend work.
Raises:
ValidationError: If the collection retrieved using the given
ID references non-existent explorations.
"""
collection = collection_services.get_collection_by_id(
collection_id, strict=strict, version=version)
exp_ids = collection.exploration_ids
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exp_ids, editor_user_id=user_id)
exp_summaries_dict_map = {
exp_summary_dict['id']: exp_summary_dict
for exp_summary_dict in exp_summary_dicts
}
# TODO(bhenning): Users should not be recommended explorations they have
# completed outside the context of a collection (see #1461).
next_exploration_ids = None
completed_exp_ids = None
if user_id:
completed_exp_ids = (
collection_services.get_valid_completed_exploration_ids(
user_id, collection))
next_exploration_ids = collection.get_next_exploration_ids(
completed_exp_ids)
else:
# If the user is not logged in or they have not completed any of
# the explorations yet within the context of this collection,
# recommend the initial explorations.
next_exploration_ids = collection.init_exploration_ids
completed_exp_ids = []
collection_dict = collection.to_dict()
collection_dict['playthrough_dict'] = {
'next_exploration_ids': next_exploration_ids,
'completed_exploration_ids': completed_exp_ids
}
collection_dict['version'] = collection.version
collection_is_public = rights_manager.is_collection_public(collection_id)
# Insert an 'exploration' dict into each collection node, where the
# dict includes meta information about the exploration (ID and title).
for collection_node in collection_dict['nodes']:
exploration_id = collection_node['exploration_id']
summary_dict = exp_summaries_dict_map.get(exploration_id)
if not allow_invalid_explorations:
if not summary_dict:
raise utils.ValidationError(
'Expected collection to only reference valid '
'explorations, but found an exploration with ID: %s (was '
'the exploration deleted or is it a private exploration '
'that you do not have edit access to?)'
% exploration_id)
if collection_is_public and rights_manager.is_exploration_private(
exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
if summary_dict:
collection_node['exploration_summary'] = summary_dict
else:
collection_node['exploration_summary'] = None
return collection_dict
def get_displayable_collection_summary_dicts_matching_ids(collection_ids):
"""Returns a list of collection summary dicts corresponding to the given
collection ids.
Args:
collection_ids: list(str). A list of collection ids.
Return:
list(dict). Each element in this list is a collection summary dict.
These elements are returned in the same order as that given
in collection_ids.
"""
collection_summaries = (
collection_services.get_collection_summaries_matching_ids(
collection_ids))
return _get_displayable_collection_summary_dicts(collection_summaries)
def get_exp_metadata_dicts_matching_query(query_string, search_cursor, user_id):
"""Given a query string and a search cursor, returns a list of exploration
metadata dicts that satisfy the search query.
Args:
query_string: str. The search query for which the search is to be
performed.
search_cursor: str or None. The cursor location to start the search
from. If None, the returned values are from the beginning
of the results list.
user_id: str or None. The id of the user performing the query.
If not None, private explorations that are editable by this user
are also returned.
Returns:
exploration_list: list(dict). A list of metadata dicts for explorations
matching the query.
new_search_cursor: str. New search cursor location.
"""
exp_ids, new_search_cursor = (
exp_services.get_exploration_ids_matching_query(
query_string, cursor=search_cursor))
exploration_list = get_exploration_metadata_dicts(exp_ids, user_id)
return exploration_list, new_search_cursor
def get_exploration_metadata_dicts(exploration_ids, editor_user_id=None):
"""Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries for collection
node search.
Args:
exploration_ids: list(str). A list of exploration ids for which
exploration metadata dicts are to be returned.
editor_user_id: str or None. The id of the user performing the query.
If not None, private explorations that are editable by this user
are also returned.
Returns:
list(dict). A list of metadata dicts corresponding to the given
exploration ids. Each dict has three keys:
'id': the exploration id;
'title': the exploration title;
'objective': the exploration objective.
"""
exploration_summaries = (
exp_services.get_exploration_summaries_matching_ids(exploration_ids))
filtered_exploration_summaries = []
for exploration_summary in exploration_summaries:
if exploration_summary is None:
continue
if exploration_summary.status == (
rights_manager.ACTIVITY_STATUS_PRIVATE):
if editor_user_id is None:
continue
if not rights_manager.Actor(editor_user_id).can_edit(
constants.ACTIVITY_TYPE_EXPLORATION,
exploration_summary.id):
continue
filtered_exploration_summaries.append(exploration_summary)
return [
summary.to_metadata_dict()
for summary in filtered_exploration_summaries]
def get_displayable_exp_summary_dicts_matching_ids(
exploration_ids, editor_user_id=None):
"""Gets a summary of explorations in human readable form from
exploration ids.
Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries. This function can
also filter based on a user ID who has edit access to the corresponding
exploration, where the editor ID is for private explorations. Please use
this function when needing summary information to display on exploration
summary tiles in the frontend.
Args:
exploration_ids: list(str). List of exploration ids.
editor_user_id: str or None. If provided, the returned value is
filtered based on a user ID who has edit access to the
corresponding explorations. Otherwise, the returned list is not
filtered.
Return:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
exploration_summaries = (
exp_services.get_exploration_summaries_matching_ids(exploration_ids))
filtered_exploration_summaries = []
for exploration_summary in exploration_summaries:
if exploration_summary is None:
continue
if exploration_summary.status == (
rights_manager.ACTIVITY_STATUS_PRIVATE):
if editor_user_id is None:
continue
if not rights_manager.Actor(editor_user_id).can_edit(
constants.ACTIVITY_TYPE_EXPLORATION,
exploration_summary.id):
continue
filtered_exploration_summaries.append(exploration_summary)
return get_displayable_exp_summary_dicts(filtered_exploration_summaries)
def get_displayable_exp_summary_dicts(exploration_summaries):
"""Gets a summary of explorations in human readable form.
Given a list of exploration summary domain objects, returns a list,
with the same number of elements, of the corresponding human-readable
exploration summary dicts.
This assumes that all the exploration summary domain objects passed in are
valid (i.e., none of them are None).
Args:
exploration_summaries: list(ExplorationSummary). List of exploration
summary objects.
Return:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
exploration_ids = [
exploration_summary.id
for exploration_summary in exploration_summaries]
view_counts = (
stats_jobs_continuous.StatisticsAggregator.get_views_multi(
exploration_ids))
displayable_exp_summaries = []
for ind, exploration_summary in enumerate(exploration_summaries):
if not exploration_summary:
continue
summary_dict = {
'id': exploration_summary.id,
'title': exploration_summary.title,
'activity_type': constants.ACTIVITY_TYPE_EXPLORATION,
'category': exploration_summary.category,
'created_on_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_created_on),
'objective': exploration_summary.objective,
'language_code': exploration_summary.language_code,
'last_updated_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_last_updated
),
'human_readable_contributors_summary': (
get_human_readable_contributors_summary(
exploration_summary.contributors_summary)
),
'status': exploration_summary.status,
'ratings': exploration_summary.ratings,
'community_owned': exploration_summary.community_owned,
'tags': exploration_summary.tags,
'thumbnail_icon_url': utils.get_thumbnail_icon_url_for_category(
exploration_summary.category),
'thumbnail_bg_color': utils.get_hex_color_for_category(
exploration_summary.category),
'num_views': view_counts[ind],
}
displayable_exp_summaries.append(summary_dict)
return displayable_exp_summaries
def _get_displayable_collection_summary_dicts(collection_summaries):
"""Gets a summary of collections in human readable form.
Args:
collection_summaries: list(CollectionSummary). List of collection
summary domain object.
Return:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
displayable_collection_summaries = []
for collection_summary in collection_summaries:
if collection_summary and collection_summary.status != (
rights_manager.ACTIVITY_STATUS_PRIVATE):
displayable_collection_summaries.append({
'id': collection_summary.id,
'title': collection_summary.title,
'category': collection_summary.category,
'activity_type': constants.ACTIVITY_TYPE_COLLECTION,
'objective': collection_summary.objective,
'language_code': collection_summary.language_code,
'tags': collection_summary.tags,
'node_count': collection_summary.node_count,
'last_updated_msec': utils.get_time_in_millisecs(
collection_summary.collection_model_last_updated),
'thumbnail_icon_url': (
utils.get_thumbnail_icon_url_for_category(
collection_summary.category)),
'thumbnail_bg_color': utils.get_hex_color_for_category(
collection_summary.category)})
return displayable_collection_summaries
def get_library_groups(language_codes):
"""Returns a list of groups for the library index page. Each group has a
header and a list of dicts representing activity summaries.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
Return:
list(dict). A list of groups for the library index page. Each group is
represented by a dict with the following keys and values:
- activity_summary_dicts: list(dict). A list of dicts representing
activity summaries.
- categories: list(str). The list of group categories.
- header_i18n_id: str. The i18n id for the header of the category.
- has_full_results_page: bool. Whether the group header links to
a "full results" page. This is always True for the
"exploration category" groups.
- full_results_url: str. The URL to the corresponding "full results"
page.
"""
language_codes_suffix = ''
if language_codes:
language_codes_suffix = ' language_code=("%s")' % (
'" OR "'.join(language_codes))
def _generate_query(categories):
# This assumes that 'categories' is non-empty.
return 'category=("%s")%s' % (
'" OR "'.join(categories), language_codes_suffix)
# Collect all collection ids so that the summary details can be retrieved
# with a single get_multi() call.
all_collection_ids = []
header_id_to_collection_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
collection_ids = collection_services.search_collections(
_generate_query(group['search_categories']), 8)[0]
header_id_to_collection_ids[group['header_i18n_id']] = collection_ids
all_collection_ids += collection_ids
collection_summaries = [
summary for summary in
collection_services.get_collection_summaries_matching_ids(
all_collection_ids)
if summary is not None]
collection_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in _get_displayable_collection_summary_dicts(
collection_summaries)
}
# Collect all exp ids so that the summary details can be retrieved with a
# single get_multi() call.
all_exp_ids = []
header_to_exp_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
exp_ids = exp_services.search_explorations(
_generate_query(group['search_categories']), 8)[0]
header_to_exp_ids[group['header_i18n_id']] = exp_ids
all_exp_ids += exp_ids
exp_summaries = [
summary for summary in
exp_services.get_exploration_summaries_matching_ids(all_exp_ids)
if summary is not None]
exp_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in get_displayable_exp_summary_dicts(exp_summaries)
}
results = []
for group in _LIBRARY_INDEX_GROUPS:
summary_dicts = []
collection_ids_to_display = (
header_id_to_collection_ids[group['header_i18n_id']])
summary_dicts = [
collection_summary_dicts[collection_id]
for collection_id in collection_ids_to_display
if collection_id in collection_summary_dicts]
exp_ids_to_display = header_to_exp_ids[group['header_i18n_id']]
summary_dicts += [
exp_summary_dicts[exp_id] for exp_id in exp_ids_to_display
if exp_id in exp_summary_dicts]
if not summary_dicts:
continue
results.append({
'header_i18n_id': group['header_i18n_id'],
'categories': group['search_categories'],
'activity_summary_dicts': summary_dicts,
'has_full_results_page': True,
'full_results_url': None,
})
return results
def require_activities_to_be_public(activity_references):
"""Raises an exception if any activity reference in the list does not
exist, or is not public.
Args:
activity_references: list(ActivityReference). A list of
ActivityReference domain objects.
Raises:
Exception: Any activity reference in the list does not
exist, or is not public.
"""
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
activity_summaries_by_type = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'ids': exploration_ids,
'summaries': exp_services.get_exploration_summaries_matching_ids(
exploration_ids),
}, {
'type': constants.ACTIVITY_TYPE_COLLECTION,
'ids': collection_ids,
'summaries': collection_services.get_collection_summaries_matching_ids(
collection_ids),
}]
for activities_info in activity_summaries_by_type:
for index, summary in enumerate(activities_info['summaries']):
if summary is None:
raise Exception(
'Cannot feature non-existent %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
if summary.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Cannot feature private %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
def get_featured_activity_summary_dicts(language_codes):
"""Returns a list of featured activities with the given language codes.
The return value is sorted according to the list stored in the datastore.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
Return:
list(dict). Each dict in this list represents a featured activity.
For example:
[ {
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'id': 'eid2',
'category': 'A category',
'ratings': feconf.get_empty_ratings(),
'title': 'A title',
'num_views': 0,
'objective': 'An objective',
}, ]
"""
activity_references = activity_services.get_featured_activity_references()
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exploration_ids)
col_summary_dicts = get_displayable_collection_summary_dicts_matching_ids(
collection_ids)
summary_dicts_by_id = {
constants.ACTIVITY_TYPE_EXPLORATION: {
summary_dict['id']: summary_dict
for summary_dict in exp_summary_dicts
},
constants.ACTIVITY_TYPE_COLLECTION: {
summary_dict['id']: summary_dict
for summary_dict in col_summary_dicts
},
}
featured_summary_dicts = []
for reference in activity_references:
if reference.id in summary_dicts_by_id[reference.type]:
summary_dict = summary_dicts_by_id[reference.type][reference.id]
if summary_dict and summary_dict['language_code'] in language_codes:
featured_summary_dicts.append(summary_dict)
return featured_summary_dicts
def get_top_rated_exploration_summary_dicts(language_codes, limit):
"""Returns a list of top rated explorations with the given language codes.
The return value is sorted in decreasing order of average rating.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
limit: int. The maximum number of explorations to return.
Return:
list(dict). Each dict in this list represents a exploration summary in
human readable form. The list is sorted in decreasing order of average
rating. For example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
filtered_exp_summaries = [
exp_summary for exp_summary in
exp_services.get_top_rated_exploration_summaries(limit).values()
if exp_summary.language_code in language_codes and
sum(exp_summary.ratings.values()) > 0]
sorted_exp_summaries = sorted(
filtered_exp_summaries,
key=lambda exp_summary: exp_summary.scaled_average_rating,
reverse=True)
return get_displayable_exp_summary_dicts(sorted_exp_summaries)
def get_recently_published_exp_summary_dicts(limit):
"""Returns a list of recently published explorations.
Args:
limit: int. The maximum number of explorations to return.
Return:
list(dict). Each dict in this list represents a featured activity in
human readable form. For example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
recently_published_exploration_summaries = [
exp_summary for exp_summary in
exp_services.get_recently_published_exp_summaries(limit).values()]
# Arranging recently published exploration summaries with respect to time.
# sorted() is used to sort the random list of recently published summaries.
summaries = sorted(
recently_published_exploration_summaries,
key=lambda exp_summary: exp_summary.first_published_msec,
reverse=True)
return get_displayable_exp_summary_dicts(summaries)
| {
"content_hash": "a84e6c057e0c24087322ffa7d05285e9",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 80,
"avg_line_length": 39.033566433566435,
"alnum_prop": 0.6265720735246695,
"repo_name": "shaz13/oppia",
"id": "56b714bf52e109e8a41cbe483861681ff85c2e1e",
"size": "28532",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/summary_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "102650"
},
{
"name": "HTML",
"bytes": "944588"
},
{
"name": "JavaScript",
"bytes": "2788895"
},
{
"name": "Python",
"bytes": "3656185"
},
{
"name": "Shell",
"bytes": "46842"
}
],
"symlink_target": ""
} |
from version import VERSION
from setuptools import setup
install_requires = ['lxml==3.1.0',
'requests==1.1.0',
'tox==1.4.3']
setup(name='ispdb',
version=VERSION,
description='Interface to Mozilla ISP database',
author='sprt',
author_email='[email protected]',
classifiers=['Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# the two following have not been tested yet
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
],
py_modules=['ispdb', 'version'],
license='MIT License',
install_requires=install_requires,
test_suite='test')
| {
"content_hash": "bca8cd8227d6a545725ac2927992e789",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 40.793103448275865,
"alnum_prop": 0.5224006762468301,
"repo_name": "dveeden/ispdb",
"id": "344d1c8182caacfeef0c411d75b5769a54b4c648",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8213"
}
],
"symlink_target": ""
} |
__all__ = [
'init',
'make_ops_dirs',
]
import dataclasses
import logging
import tempfile
from pathlib import Path
import g1.files
from g1 import scripts
from g1.bases.assertions import ASSERT
from g1.containers import models as ctr_models
from g1.containers import scripts as ctr_scripts
from g1.texts import jsons
from . import bases
from . import envs as ops_envs
from . import models
from . import repos
from . import systemds
from . import tokens
LOG = logging.getLogger(__name__)
class PodBundleDir(repos.AbstractBundleDir):
deploy_instruction_type = models.PodDeployInstruction
def post_init(self):
ASSERT.predicate(self.path, Path.is_dir)
ASSERT.predicate(self.deploy_instruction_path, Path.is_file)
ASSERT.all((path for _, path in self.iter_images()), Path.is_file)
ASSERT.all((path for _, path in self.iter_volumes()), Path.is_file)
def iter_images(self):
for image in self.deploy_instruction.images:
yield image, (
self.path / \
models.POD_BUNDLE_IMAGES_DIR_NAME /
image.name /
models.POD_BUNDLE_IMAGE_FILENAME
)
def iter_volumes(self):
for volume in self.deploy_instruction.volumes:
yield volume, (
self.path / \
models.POD_BUNDLE_VOLUMES_DIR_NAME /
volume.name /
models.POD_BUNDLE_VOLUME_FILENAME
)
class PodOpsDir(repos.AbstractOpsDir):
metadata_type = models.PodMetadata
# XXX: This annotation works around pylint no-member false errors.
metadata: object
@staticmethod
def _get_pod_id_set(metadata):
return {config.pod_id for config in metadata.systemd_unit_configs}
def check_invariants(self, active_ops_dirs):
# We check uniqueness of UUIDs here, but to be honest, UUID is
# quite unlikely to conflict.
for ops_dir in active_ops_dirs:
ASSERT.isdisjoint(
self._get_pod_id_set(ops_dir.metadata),
self._get_pod_id_set(self.metadata),
)
def install(self, bundle_dir, target_ops_dir_path):
ASSERT.isinstance(bundle_dir, PodBundleDir)
log_args = (bundle_dir.label, bundle_dir.version)
# Make metadata first so that uninstall may roll back properly.
LOG.debug('pods install: metadata: %s %s', *log_args)
metadata, groups = self._make_metadata(bundle_dir.deploy_instruction)
jsons.dump_dataobject(metadata, self.metadata_path)
bases.set_file_attrs(self.metadata_path)
# Sanity check of the just-written metadata file.
ASSERT.equal(self.label, bundle_dir.label)
ASSERT.equal(self.version, bundle_dir.version)
ASSERT.equal(self.metadata, metadata)
LOG.debug(
'pods install: pod ids: %s %s: %s', *log_args, ', '.join(groups)
)
LOG.debug('pods install: volumes: %s %s', *log_args)
bases.make_dir(self.volumes_dir_path)
for volume, volume_path in bundle_dir.iter_volumes():
volume_dir_path = self.volumes_dir_path / volume.name
LOG.debug('pods: extract: %s -> %s', volume_path, volume_dir_path)
bases.make_dir(ASSERT.not_predicate(volume_dir_path, Path.exists))
scripts.tar_extract(
volume_path,
directory=volume_dir_path,
extra_args=(
'--same-owner',
'--same-permissions',
),
)
LOG.debug('pods install: images: %s %s', *log_args)
for _, image_path in bundle_dir.iter_images():
ctr_scripts.ctr_import_image(image_path)
LOG.debug('pods install: tokens: %s %s', *log_args)
assignments = {}
with tokens.make_tokens_database().writing() as active_tokens:
for pod_id in groups:
assignments[pod_id] = {
alias: active_tokens.assign(token_name, pod_id, alias)
for alias, token_name in
bundle_dir.deploy_instruction.token_names.items()
}
envs = ops_envs.load()
LOG.debug('pods install: prepare pods: %s %s', *log_args)
bases.make_dir(self.refs_dir_path)
for pod_id, group in groups.items():
pod_config = self._make_pod_config(
bundle_dir.deploy_instruction,
target_ops_dir_path,
systemds.make_envs(
pod_id,
self.metadata,
group.envs,
envs,
assignments[pod_id],
),
)
with tempfile.NamedTemporaryFile() as config_tempfile:
config_path = Path(config_tempfile.name)
jsons.dump_dataobject(pod_config, config_path)
ctr_scripts.ctr_prepare_pod(pod_id, config_path)
ctr_scripts.ctr_add_ref_to_pod(pod_id, self.refs_dir_path / pod_id)
LOG.debug('pods install: systemd units: %s %s', *log_args)
units = {(pod_id, unit.name): unit
for pod_id, group in groups.items() for unit in group.units}
for config in self.metadata.systemd_unit_configs:
systemds.install(
config,
self.metadata,
groups[config.pod_id],
units[config.pod_id, config.name],
envs,
assignments[config.pod_id],
)
systemds.daemon_reload()
return True
@staticmethod
def _make_metadata(deploy_instruction):
groups = {}
systemd_unit_configs = []
for group in deploy_instruction.systemd_unit_groups:
pod_id = ctr_models.generate_pod_id()
groups[pod_id] = group
systemd_unit_configs.extend(
models.PodMetadata.SystemdUnitConfig(
pod_id=pod_id,
name=unit.name,
auto_start=unit.auto_start,
auto_stop=unit.auto_stop,
) for unit in group.units
)
metadata = models.PodMetadata(
label=deploy_instruction.label,
version=deploy_instruction.version,
images=deploy_instruction.images,
systemd_unit_configs=systemd_unit_configs,
)
return metadata, groups
@staticmethod
def _make_pod_config(deploy_instruction, target_ops_dir_path, envs):
def volume_to_mount(volume):
return ctr_models.PodConfig.Mount(
source=str(
target_ops_dir_path / \
models.OPS_DIR_VOLUMES_DIR_NAME /
volume.name
),
target=volume.target,
read_only=volume.read_only,
)
return dataclasses.replace(
deploy_instruction.pod_config_template,
apps=[
dataclasses.replace(
app,
exec=[arg.format_map(envs) for arg in app.exec],
service_section=(
app.service_section if app.service_section is None else
app.service_section.format_map(envs)
),
) for app in deploy_instruction.pod_config_template.apps
],
mounts=[
*deploy_instruction.pod_config_template.mounts,
*map(volume_to_mount, deploy_instruction.volumes),
],
)
def start(self, *, unit_names=None, all_units=False):
"""Enable and start the requested units."""
ASSERT.not_all((unit_names is not None, all_units))
LOG.info('pods start: %s %s', self.label, self.version)
if unit_names is not None:
predicate = lambda config: config.name in unit_names
elif all_units:
predicate = None
else:
predicate = lambda config: config.auto_start
for config in self._filter_pod_ids_and_units(predicate):
systemds.activate(config)
def restart(self, *, unit_names=None, all_units=False):
"""Restart the requested units.
NOTE: `restart` is not equivalent to `stop` followed by `start`
for two reasons:
* `start` and `stop` enables and disables the requested units,
but `restart` does not.
* The default behavior (when both `unit_names` and `all_units`
are not set) of `restart` only restarts units of which both
`auto_start` and `auto_stop` are true, which is usually what
you want. But the default behavior of `stop` followed by
`start` stops or starts only units of which `auto_start` or
`auto_stop` is true. So units of which `auto_start` is false
will be stopped but not restarted, and units of which
`auto_stop` is false will not be stopped and thus not
restarted; this is generally not what you want.
"""
ASSERT.not_all((unit_names is not None, all_units))
LOG.info('pods restart: %s %s', self.label, self.version)
if unit_names is not None:
predicate = lambda config: config.name in unit_names
elif all_units:
predicate = None
else:
predicate = lambda config: config.auto_start and config.auto_stop
for config in self._filter_pod_ids_and_units(predicate):
systemds.restart(config)
def stop(self, *, unit_names=None, all_units=False):
"""Disable and stop the requested units."""
LOG.info('pods stop: %s %s', self.label, self.version)
if unit_names is not None:
predicate = lambda config: config.name in unit_names
elif all_units:
predicate = None
else:
predicate = lambda config: config.auto_stop
for config in self._filter_pod_ids_and_units(predicate):
systemds.deactivate(config)
def stop_all(self):
self.stop(all_units=True)
def _filter_pod_ids_and_units(self, predicate):
return filter(predicate, self.metadata.systemd_unit_configs)
def uninstall(self):
if not self.metadata_path.exists():
LOG.info('skip: pods uninstall: metadata was removed')
ASSERT.predicate(self.path, g1.files.is_empty_dir)
return False
log_args = (self.label, self.version)
LOG.debug('pods uninstall: systemd units: %s %s', *log_args)
for config in self.metadata.systemd_unit_configs:
systemds.uninstall(config)
systemds.daemon_reload()
LOG.debug('pods uninstall: pods: %s %s', *log_args)
g1.files.remove(self.refs_dir_path)
for pod_id in self._get_pod_id_set(self.metadata):
ctr_scripts.ctr_remove_pod(pod_id)
LOG.debug('pods uninstall: tokens: %s %s', *log_args)
with tokens.make_tokens_database().writing() as active_tokens:
for config in self.metadata.systemd_unit_configs:
active_tokens.unassign_all(config.pod_id)
LOG.debug('pods uninstall: images: %s %s', *log_args)
for image in self.metadata.images:
ctr_scripts.ctr_remove_image(image, skip_active=True)
LOG.debug('pods uninstall: volumes: %s %s', *log_args)
g1.files.remove(self.volumes_dir_path)
LOG.debug('pods uninstall: metadata: %s %s', *log_args)
g1.files.remove(self.metadata_path) # Remove metadata last.
ASSERT.predicate(self.path, g1.files.is_empty_dir)
return True
def init():
repos.OpsDirs.init(_get_ops_dirs_path())
def make_ops_dirs():
return repos.OpsDirs(
models.REPO_PODS_DIR_NAME,
_get_ops_dirs_path(),
bundle_dir_type=PodBundleDir,
ops_dir_type=PodOpsDir,
)
def _get_ops_dirs_path():
return bases.get_repo_path() / models.REPO_PODS_DIR_NAME
| {
"content_hash": "19fd98920d8a81b3d7dfae73432576be",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 79,
"avg_line_length": 37.711598746081506,
"alnum_prop": 0.5804655029093931,
"repo_name": "clchiou/garage",
"id": "4c198779801e733218bf1be8dc9e91e97c437e68",
"size": "12030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/g1/operations/cores/g1/operations/cores/pod_ops_dirs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cap'n Proto",
"bytes": "6917"
},
{
"name": "HTML",
"bytes": "113"
},
{
"name": "Java",
"bytes": "61027"
},
{
"name": "Python",
"bytes": "1653733"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
} |
from ..fetching import load_data_file
# List the vispy fonts made available online
_vispy_fonts = ('OpenSans', 'Cabin')
def _get_vispy_font_filename(face, bold, italic):
"""Fetch a remote vispy font"""
name = face + '-'
name += 'Regular' if not bold and not italic else ''
name += 'Bold' if bold else ''
name += 'Italic' if italic else ''
name += '.ttf'
return load_data_file('fonts/%s' % name)
| {
"content_hash": "f1f5a34e4efd30057220450bc17f5231",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.6244131455399061,
"repo_name": "jdreaver/vispy",
"id": "842a3e72a12343d35168ea0f7268b7ba5934e79b",
"size": "752",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "vispy/util/fonts/_vispy_fonts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2465363"
}
],
"symlink_target": ""
} |
"""Unit tests for reviewboard.search.signal_processor.SignalProcessor."""
import haystack
import kgb
from djblets.siteconfig.models import SiteConfiguration
from haystack.signals import BaseSignalProcessor
from reviewboard.search.signal_processor import SignalProcessor, logger
from reviewboard.testing.testcase import TestCase
class SignalProcessorTests(kgb.SpyAgency, TestCase):
"""Unit tests for reviewboard.search.signal_processor.SignalProcessor."""
def test_can_process_signals_with_siteconfig(self):
"""Testing SignalProcessor.can_process_signals with stored
SiteConfiguration
"""
self.assertIsNotNone(SiteConfiguration.objects.get_current())
signal_processor = self._create_signal_processor()
self.assertTrue(signal_processor.can_process_signals)
def test_can_process_signals_without_siteconfig(self):
"""Testing SignalProcessor.can_process_signals without stored
SiteConfiguration
"""
self.spy_on(SiteConfiguration.objects.get_current,
op=kgb.SpyOpRaise(SiteConfiguration.DoesNotExist))
signal_processor = self._create_signal_processor()
self.assertFalse(signal_processor.can_process_signals)
# Make sure it works once one has been created.
SiteConfiguration.objects.get_current.unspy()
self.assertTrue(signal_processor.can_process_signals)
def test_handle_delete_with_error(self):
"""Testing SignalProcessor.handle_delete with error"""
exception = Exception('kaboom!')
self.spy_on(BaseSignalProcessor.handle_delete,
owner=BaseSignalProcessor,
op=kgb.SpyOpRaise(exception))
self.spy_on(logger.error)
signal_processor = self._create_signal_processor()
# This should not raise an exception.
#
# We'll use some garbage values.
signal_processor.handle_delete(sender=None,
instance=None)
self.assertSpyCalled(BaseSignalProcessor.handle_delete)
self.assertSpyCalledWith(
logger.error,
('Error updating the search index. Check to make sure the '
'search backend is running and configured correctly, and then '
'rebuild the search index. Error: %s'),
exception)
def test_handle_save_with_error(self):
"""Testing SignalProcessor.handle_save with error"""
exception = Exception('kaboom!')
self.spy_on(BaseSignalProcessor.handle_save,
owner=BaseSignalProcessor,
op=kgb.SpyOpRaise(exception))
self.spy_on(logger.error)
signal_processor = self._create_signal_processor()
# This should not raise an exception.
#
# We'll use some garbage values.
signal_processor.handle_save(sender=None,
instance=None)
self.assertSpyCalled(BaseSignalProcessor.handle_save)
self.assertSpyCalledWith(
logger.error,
('Error updating the search index. Check to make sure the '
'search backend is running and configured correctly, and then '
'rebuild the search index. Error: %s'),
exception)
def _create_signal_processor(self):
"""Return a new instance of our Haystack signal processor.
Returns:
reviewboard.search.signal_processor.SignalProcessor:
The new signal processor.
"""
return SignalProcessor(haystack.connections,
haystack.connection_router)
| {
"content_hash": "042f8959ec6a21d9f75a9d91139377f0",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 38.0625,
"alnum_prop": 0.6502463054187192,
"repo_name": "reviewboard/reviewboard",
"id": "2f21d2f4f3eea2ab6765218e2ed209e035b9d0ef",
"size": "3654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/search/tests/test_signal_processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""Implementation of an interactive wizard widget."""
from grr.gui import renderers
from grr.gui.plugins import forms
from grr.lib import aff4
class WizardRenderer(renderers.TemplateRenderer):
"""This renderer creates a wizard."""
render_as_modal = True
current_page = 0
# WizardPage objects that defined this wizard's behaviour.
title = ""
pages = []
# This will be used for identifying the wizard when publishing the events.
wizard_name = "wizard"
layout_template = renderers.Template("""
<div id="Wizard_{{unique|escape}}"
class="Wizard{% if this.render_as_modal %} modal-dialog{% endif %} FormData"
data-current='{{this.current_page|escape}}'
data-max_page='{{this.max_pages|escape}}'
>
{% if this.render_as_modal %}<div class="modal-content">{% endif %}
{% for i, page, page_cls, page_renderer in this.raw_pages %}
<div id="Page_{{i|escape}}" class="WizardPage"
data-renderer="{{page_renderer|escape}}"
style="display: none">
<div class="WizardBar modal-header">
<button type="button" class="close" data-dismiss="modal"
aria-hidden="true">x</button>
<h3>{{this.title|escape}} -
<span class="Description">
{{page_cls.description|escape}}
</span>
</h3>
</div>
<div class="modal-body">
{{page|safe}}
</div>
</div>
{% endfor %}
<div class="modal-footer navbar-inner">
<ul class="nav pull-left">
<div id="Message{{unique}}"/>
<div class="navbar-text" id="footer_message_{{unique}}"></div>
</ul>
<ul class="nav nav pull-right">
<button class="btn btn-default Back" style='display: none'>Back</button>
<button class="btn btn-primary Next">Next</button>
<button class="btn btn-primary Finish" style='display: none'
data-dismiss="modal"
>
Finish
</button>
</ul>
</div>
{% if this.render_as_modal %}</div>{% endif %}
</div>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.raw_pages = []
for i, page_cls in enumerate(self.pages):
# Make the page renderers dump all their data to the wizard DOM node.
page_renderer = page_cls(id="Page_%d" % i)
self.raw_pages.append((i, page_renderer.RawHTML(request),
page_cls, page_cls.__name__))
self.max_pages = len(self.pages) - 1
super(WizardRenderer, self).Layout(request, response)
return self.CallJavascript(response, "WizardRenderer.Layout")
class AFF4AttributeFormRenderer(forms.TypeDescriptorFormRenderer):
"""A renderer for AFF4 attribute forms."""
type = aff4.AFF4Attribute
layout_template = """<div class="form-group">
""" + forms.TypeDescriptorFormRenderer.default_description_view + """
<div class="controls">
<select id="{{this.prefix}}" class="unset"
onchange="grr.forms.inputOnChange(this)"
>
{% for name in this.attributes %}
{% if name %}
<option {% ifequal name this.value %}selected{% endifequal %}
value="{{name|escape}}">
{{name|escape}}
{% ifequal name this.value %} (default){% endifequal %}
</option>
{% endif %}
{% endfor %}
</select>
</div>
</div>
"""
def __init__(self, **kwargs):
super(AFF4AttributeFormRenderer, self).__init__(**kwargs)
self.attributes = ["Unset"]
self.attributes.extend(sorted(aff4.Attribute.NAMES.keys()))
| {
"content_hash": "06e06e60858e2974414b7bc3bbd99a61",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 30.089285714285715,
"alnum_prop": 0.6391691394658754,
"repo_name": "statik/grr",
"id": "9a1c18ba24bd31d07357045e47d47f870f58a2e8",
"size": "3392",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "gui/plugins/wizards.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "303841"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "12812"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "83451"
},
{
"name": "JavaScript",
"bytes": "229046"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "202829"
},
{
"name": "Python",
"bytes": "5266989"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43667"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
"""Functional tests for reduction ops."""
import itertools
import numbers
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# The maximum input rank to test.
_MAX_RANK = 5
def _powerset(iterable):
"""Helper for generating all possible reduction_axes arguments.
Example:
powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)
Args:
iterable: An iterable of items to generate the powerset of.
Returns:
The powerset of all items in iterable.
"""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output, result)
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.cached_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testNegAxes(self):
with self.cached_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class ReductionUnknownShape(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session():
for dtype, reductions in [(dtypes.float32,
(math_ops.reduce_sum, math_ops.reduce_mean,
math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_euclidean_norm)),
(dtypes.bool, (math_ops.reduce_all,
math_ops.reduce_any))]:
for reduction in reductions:
x = array_ops.placeholder(
dtype=dtype, shape=None) # Some tensor w/ unknown shape.
y = reduction(x)
self.assertEqual(y.shape, ())
class ReductionInvalidKeepdims(test.TestCase):
def testBasic(self):
# Test case for GitHub issue 46700.
for dtype, reductions in [
(dtypes.float32, (math_ops.reduce_sum, math_ops.reduce_mean,
math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min, math_ops.reduce_euclidean_norm)),
(dtypes.bool, (math_ops.reduce_all, math_ops.reduce_any))
]:
for reduction in reductions:
with self.assertRaisesRegex(ValueError, "The truth value"):
x = True if dtype == dtypes.bool else 1
y = reduction(
input_tensor=x, keepdims=np.array([63600, 1], dtype=np.float16))
self.evaluate(y)
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _np_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _makeIncremental(self, shape, dtype):
data = np.arange(np.prod(shape)).reshape(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _makeRandom(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _compare(self,
x,
reduction_axes,
keepdims,
feed_dict=None,
rtol=1e-6,
atol=1e-6):
np_ans = self._np_reduce(x, reduction_axes, keepdims)
with self.cached_session() as sess:
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out, rtol=rtol, atol=atol)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self,
x,
reduction_axes,
feed_dict=None,
rtol=1e-6,
atol=1e-6):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0], rtol=rtol, atol=atol)
self._compare(
x,
reduction_axes,
keepdims=False,
feed_dict=feed_dict,
rtol=rtol,
atol=atol)
self._compare(
x,
reduction_axes,
keepdims=True,
feed_dict=feed_dict,
rtol=rtol,
atol=atol)
def _compareAllAxes(self, x, feed_dict=None, rtol=1e-6, atol=1e-6):
self._compareAll(x, None, rtol=rtol, atol=atol)
for axes in _powerset(range(x.ndim)):
self._compareAll(x, axes, feed_dict, rtol=rtol, atol=atol)
def _compareGradient(self, x, reduction_axes, rtol=1e-8, atol=1e-8):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = self._tf_reduce(t, reduction_axes, False)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, su.get_shape().as_list(), x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=rtol, atol=atol)
def _compareGradientAxes(self, x, rtol=1e-8, atol=1e-8):
self._compareGradient(x, None, rtol=rtol, atol=atol)
self._compareGradient(x, [], rtol=rtol, atol=atol)
self._compareGradient(x, 0, rtol=rtol, atol=atol)
self._compareGradient(x, [1], rtol=rtol, atol=atol)
self._compareGradient(x, [2], rtol=rtol, atol=atol)
self._compareGradient(x, [1, 2], rtol=rtol, atol=atol)
self._compareGradient(x, [0, 1, 2, 3], rtol=rtol, atol=atol)
class SumReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float16)
self._compareAllAxes(np_arr)
# test that mean doesn't overflow
# only on GPU, since it has the more accurate implementation
if not test.is_gpu_available():
return
arr = np.ones([68000], dtype=np.float16)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_arr = variables.Variable(arr)
self.evaluate(variables.global_variables_initializer())
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
tf_out_mean = self.evaluate(tf_mean)
self.assertAllClose(tf_out_mean, 1.)
@test_util.run_deprecated_v1
def testBfloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.bfloat16)
self._compareAllAxes(np_arr, rtol=1e-3, atol=5.)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2**np.random.uniform(0, 15))
size_y = int(2**np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
for size_x in [1, 3, 16, 33]:
for size_y in [1, 3, 16, 33]:
for size_z in [1, 3, 16, 33]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
@test_util.run_deprecated_v1
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().rank)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)
@test_util.run_deprecated_v1
def testWrongShapeForReductionIndices(self):
reduction_axes = [[1], [2]]
c_unknown = array_ops.placeholder(dtypes.float32)
with self.assertRaisesWithPredicateMatch(ValueError,
".*must be at most rank 1.*"):
math_ops.reduce_sum(c_unknown, reduction_axes)
def testInvalidRepeatedReductionIndices(self):
reduction_axes = constant_op.constant([0, 0])
c = constant_op.constant([1.0, 2.0])
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
".*Axes contains duplicate dimension: 0.*"):
self.evaluate(math_ops.reduce_sum(c, reduction_axes))
# Int64??
@test_util.run_deprecated_v1
def testGradient(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
x = self._makeIncremental([2, 3, 4, 2], dtype)
self._compareGradientAxes(x)
@test_util.run_deprecated_v1
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session():
for dtype in (dtypes.bfloat16, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y, np.zeros(9938))
class MeanReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
elif isinstance(reduction_axes, numbers.Integral):
reduction_axes = (reduction_axes,)
if reduction_axes is None:
count = np.prod(x.shape)
else:
count = np.prod([x.shape[ax] for ax in reduction_axes])
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testUint8(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeRandom((2,) * rank, dtypes.uint8)
self._compareAllAxes(np_arr)
# This tests the issue reported in b/145030710.
@test_util.run_deprecated_v1
def testSizeOverflowUint8(self):
np_arr = self._makeRandom((2**8,), dtypes.uint8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt8(self):
np_arr = self._makeRandom((2**7,), dtypes.int8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowUint16(self):
np_arr = self._makeRandom((2**16,), dtypes.uint16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt16(self):
np_arr = self._makeRandom((2**15,), dtypes.int16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testBfloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.bfloat16)
self._compareAllAxes(np_arr, rtol=1e-3, atol=1.)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session():
for dtype in (dtypes.bfloat16, dtypes.float16, dtypes.float32,
dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class EuclideanNormReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
np_fro = np.sqrt(
np.sum(x * np.conj(x), axis=reduction_axes, keepdims=keepdims))
if np.issubdtype(x.dtype, np.integer):
np_fro = np.floor(np_fro)
return np_fro
@test_util.run_deprecated_v1
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testSingleton(self):
for dtype in [np.float32, np.float64]:
np_arr = np.array([-1.]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
with self.session():
for dtype in (dtypes.bfloat16, dtypes.float16, dtypes.float32,
dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_euclidean_norm(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertAllEqual(y, np.zeros(9938))
@test_util.run_deprecated_v1
def testGradient(self):
shape = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
# zero value entry will result NaN gradient if reduction doesn't happen.
# e.g., `tf.math.reduce_sum([0, 1], axis=[])` so add one to avoid it.
x = self._makeIncremental(shape, dtype) + 1.0
self._compareGradientAxes(x, rtol=1e-2, atol=1e-2)
class ProdReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_prod(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
# Numpy automatically upgrades the type of np.prod from int32 to int64, so
# Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid
# overflow, limit array values.
for rank in range(1, _MAX_RANK):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32) % 5 + 1
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInt64(self):
for rank in range(1, _MAX_RANK):
# Avoid overflow by limiting array values.
np_arr = self._makeIncremental((2,) * rank, dtypes.int64) % 11 + 1
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testBfloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.bfloat16) * \
np.array([0.01]).astype(dtypes.bfloat16.as_numpy_dtype)
self._compareAllAxes(np_arr, rtol=1e-2, atol=1e-2)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = self._makeIncremental(s, dtypes.float32) / 20.
# No zeros in input
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session():
for dtype in (dtypes.bfloat16, dtypes.float16, dtypes.float32,
dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y, np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=True)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.disable_xla("b/168718272") # XLA handling of NaN is inconsistent
def testSpecialValues(self):
for dtype in [np.float32, np.float64]:
for size in range(1, 4):
for arr in itertools.product([-np.inf, 1., np.nan, np.inf],
repeat=size):
self._compareAll(np.array(arr, dtype=dtype), None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=True)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.disable_xla("b/168718272") # XLA handling of NaN is inconsistent
def testSpecialValues(self):
for dtype in [np.float32, np.float64]:
for size in range(1, 4):
for arr in itertools.product([-np.inf, 1., np.nan, np.inf],
repeat=size):
self._compareAll(np.array(arr, dtype=dtype), None)
def testInt64Reduce3D(self):
# Create a 3D array of int64s and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testBfloat16Reduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31,
-1).reshape([2, 3,
5]).astype(dtypes.bfloat16.as_numpy_dtype)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_max(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session():
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session():
v = math_ops.reduce_any([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class CountNonzeroReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,
feed_dict=None):
np_ans = (x != zero).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
@test_util.run_deprecated_v1
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,
7]).astype(
np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testDegenerate(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y, np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712
with self.cached_session() as sess:
v = math_ops.count_nonzero(constant_op.constant(["test"]))
self.assertAllClose(self.evaluate(v), 1)
@test_util.run_deprecated_v1
def testStringReduce1D(self):
# Create a 1D array of strings
x = np.asarray(["", "", "a", "", "", "b"])
self._compare(x, None, keepdims=False, zero=np.str_(""))
self._compare(x, [], keepdims=False, zero=np.str_(""))
self._compare(x, [0], keepdims=False, zero=np.str_(""))
self._compare(x, None, keepdims=True, zero=np.str_(""))
self._compare(x, [], keepdims=True, zero=np.str_(""))
self._compare(x, [0], keepdims=True, zero=np.str_(""))
@test_util.run_deprecated_v1
def testStringReduce2D(self):
# Create a 2D array of strings
x = np.asarray([["", "", "a", "", "", "b"],
["", "c", "", "d", "", ""],
["e", "", "f", "", "", ""]])
self._compare(x, None, keepdims=False, zero=np.str_(""))
self._compare(x, [], keepdims=False, zero=np.str_(""))
self._compare(x, [0], keepdims=False, zero=np.str_(""))
self._compare(x, [1], keepdims=False, zero=np.str_(""))
self._compare(x, [0, 1], keepdims=False, zero=np.str_(""))
self._compare(x, None, keepdims=True, zero=np.str_(""))
self._compare(x, [], keepdims=True, zero=np.str_(""))
self._compare(x, [0], keepdims=True, zero=np.str_(""))
self._compare(x, [0, 1], keepdims=True, zero=np.str_(""))
if __name__ == "__main__":
test.main()
| {
"content_hash": "ffb443208c98af4cf414db554c1dbb6a",
"timestamp": "",
"source": "github",
"line_count": 1237,
"max_line_length": 80,
"avg_line_length": 36.936944219886826,
"alnum_prop": 0.6216322689369898,
"repo_name": "tensorflow/tensorflow",
"id": "ffa7601b17365bb6c24b62f2e32f6c9399308b7d",
"size": "46380",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1400913"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "126099822"
},
{
"name": "CMake",
"bytes": "182430"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11447433"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300213"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42782002"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621854"
},
{
"name": "Smarty",
"bytes": "89538"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7738020"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from flask import render_template, current_app, abort
from flask_wtf import FlaskForm as Form
from wtforms import StringField, PasswordField, TextAreaField, FileField, validators
from notifications_utils.template import Template
from app.main import main
@main.route('/_styleguide')
def styleguide():
if not current_app.config['SHOW_STYLEGUIDE']:
abort(404)
class FormExamples(Form):
username = StringField(u'Username')
password = PasswordField(u'Password', [validators.required()])
code = StringField('Enter code')
message = TextAreaField(u'Message')
file_upload = FileField('Upload a CSV file to add your recipients’ details')
sms = "Your vehicle tax for ((registration number)) is due on ((date)). Renew online at www.gov.uk/vehicle-tax"
form = FormExamples()
form.message.data = sms
form.validate()
template = Template({'content': sms})
return render_template(
'views/styleguide.html',
form=form,
template=template
)
| {
"content_hash": "25438817f0283b83462dc7276bd29573",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 115,
"avg_line_length": 31.424242424242426,
"alnum_prop": 0.6875602700096431,
"repo_name": "gov-cjwaszczuk/notifications-admin",
"id": "52cc05de1a3777f0b1989650caf34ed47c307f67",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/views/styleguide.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62775"
},
{
"name": "HTML",
"bytes": "300104"
},
{
"name": "JavaScript",
"bytes": "33859"
},
{
"name": "Makefile",
"bytes": "9209"
},
{
"name": "Python",
"bytes": "1013002"
},
{
"name": "Shell",
"bytes": "5460"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
import os
import copy
import tempfile
import numpy
import tables
from tables import (
StringCol, BoolCol, IntCol, FloatCol, Int8Col, Int32Col, Int64Col,
UInt16Col, Float32Col,
)
from tables.index import Index, default_auto_index, default_index_filters
from tables.idxutils import calc_chunksize
from tables.exceptions import OldIndexWarning
from tables.tests import common
from tables.tests.common import verbose, allequal, heavy, TempFileMixin
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
import six
from six.moves import range
# Sensible parameters for indexing with small blocksizes
minRowIndex = 10
small_blocksizes = (96, 24, 6, 3)
class TDescr(tables.IsDescription):
var1 = StringCol(itemsize=4, dflt=b"", pos=1)
var2 = BoolCol(dflt=0, pos=2)
var3 = IntCol(dflt=0, pos=3)
var4 = FloatCol(dflt=0, pos=4)
class BasicTestCase(common.TempFileMixin, TestCase):
compress = 0
complib = "zlib"
shuffle = 0
fletcher32 = 0
nrows = minRowIndex
ss = small_blocksizes[2]
def setUp(self):
super(BasicTestCase, self).setUp()
self.rootgroup = self.h5file.root
self.populateFile()
# Close the file
self.h5file.close()
def populateFile(self):
group = self.rootgroup
# Create a table
title = "This is the IndexArray title"
self.filters = tables.Filters(complevel=self.compress,
complib=self.complib,
shuffle=self.shuffle,
fletcher32=self.fletcher32)
table = self.h5file.create_table(group, 'table', TDescr, title,
self.filters, self.nrows)
for i in range(self.nrows):
table.row['var1'] = str(i).encode('ascii')
# table.row['var2'] = i > 2
table.row['var2'] = i % 2
table.row['var3'] = i
table.row['var4'] = float(self.nrows - i - 1)
table.row.append()
table.flush()
# Index all entries:
for col in six.itervalues(table.colinstances):
indexrows = col.create_index(_blocksizes=small_blocksizes)
if verbose:
print("Number of written rows:", self.nrows)
print("Number of indexed rows:", indexrows)
return
def test00_flushLastRow(self):
"""Checking flushing an Index incrementing only the last row."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test00_flushLastRow..." %
self.__class__.__name__)
# Open the HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
# Add just 3 rows more
for i in range(3):
table.row['var1'] = str(i).encode('ascii')
table.row.append()
table.flush() # redo the indexes
idxcol = table.cols.var1.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
print("Elements in last row:", idxcol.indicesLR[-1])
# Do a selection
results = [p["var1"] for p in table.where('var1 == b"1"')]
self.assertEqual(len(results), 2)
self.assertEqual(results, [b'1']*2)
def test00_update(self):
"""Checking automatic re-indexing after an update operation."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test00_update..." % self.__class__.__name__)
# Open the HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
# Modify a couple of columns
for i, row in enumerate(table.where("(var3>1) & (var3<5)")):
row['var1'] = str(i)
row['var3'] = i
row.update()
table.flush() # redo the indexes
idxcol1 = table.cols.var1.index
idxcol3 = table.cols.var3.index
if verbose:
print("Dirtyness of var1 col:", idxcol1.dirty)
print("Dirtyness of var3 col:", idxcol3.dirty)
self.assertEqual(idxcol1.dirty, False)
self.assertEqual(idxcol3.dirty, False)
# Do a couple of selections
results = [p["var1"] for p in table.where('var1 == b"1"')]
self.assertEqual(len(results), 2)
self.assertEqual(results, [b'1']*2)
results = [p["var3"] for p in table.where('var3 == 0')]
self.assertEqual(len(results), 2)
self.assertEqual(results, [0]*2)
def test01_readIndex(self):
"""Checking reading an Index (string flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var1"] for p in table.where('var1 == b"1"')]
self.assertEqual(len(results), 1)
self.assertEqual(results, [b'1'])
def test02_readIndex(self):
"""Checking reading an Index (bool flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var2.index
if verbose:
print("Rows in table:", table.nrows)
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var2"] for p in table.where('var2 == True')]
if verbose:
print("Selected values:", results)
self.assertEqual(len(results), self.nrows // 2)
self.assertEqual(results, [True]*(self.nrows // 2))
def test03_readIndex(self):
"""Checking reading an Index (int flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test03_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var3.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var3"] for p in table.where('(1<var3)&(var3<10)')]
if verbose:
print("Selected values:", results)
self.assertEqual(len(results), min(10, table.nrows) - 2)
self.assertEqual(results, list(range(2, min(10, table.nrows))))
def test04_readIndex(self):
"""Checking reading an Index (float flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test04_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of rows in table:", table.nrows)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var4"] for p in table.where('var4 < 10')]
# results = [p["var4"] for p in table.where('(1<var4)&(var4<10)')]
if verbose:
print("Selected values:", results)
self.assertEqual(len(results), min(10, table.nrows))
self.assertEqual(results, [float(i) for i in
reversed(list(range(min(10, table.nrows))))])
def test05_getWhereList(self):
"""Checking reading an Index with get_where_list (string flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test05_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test06_getWhereList(self):
"""Checking reading an Index with get_where_list (bool flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test06_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var2.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Rows in tables:", table.nrows)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "numpy"
rowList1 = table.get_where_list('var2 == False', sort=True)
rowList2 = [p.nrow for p in table if p['var2'] is False]
# Convert to a NumPy object
rowList2 = numpy.array(rowList2, numpy.int64)
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertTrue(allequal(rowList1, rowList2))
def test07_getWhereList(self):
"""Checking reading an Index with get_where_list (int flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "python"
rowList1 = table.get_where_list('var3 < 15', sort=True)
rowList2 = [p.nrow for p in table if p["var3"] < 15]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test08_getWhereList(self):
"""Checking reading an Index with get_where_list (float flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test08_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "python"
rowList1 = table.get_where_list('var4 < 10', sort=True)
rowList2 = [p.nrow for p in table if p['var4'] < 10]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test09a_removeIndex(self):
"""Checking removing an index."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09a_removeIndex..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 column:", table.cols.var1)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# delete the index
table.cols.var1.remove_index()
if verbose:
print("After deletion")
print("var1 column:", table.cols.var1)
self.assertTrue(table.cols.var1.index is None)
self.assertEqual(table.colindexed["var1"], 0)
# re-create the index again
indexrows = table.cols.var1.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
def test09b_removeIndex(self):
"""Checking removing an index (persistent version)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09b_removeIndex..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 index column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
# delete the index
table.cols.var1.remove_index()
# close and reopen the file
self._reopen(mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("After deletion")
print("var1 column:", table.cols.var1)
self.assertTrue(table.cols.var1.index is None)
self.assertEqual(table.colindexed["var1"], 0)
# re-create the index again
indexrows = table.cols.var1.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
def test10a_moveIndex(self):
"""Checking moving a table with an index."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10a_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 column:", idxcol)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test10b_moveIndex(self):
"""Checking moving a table with an index (persistent version)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10b_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 index column:", idxcol)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
# close and reopen the file
self._reopen(mode="a")
table = self.h5file.root.agroup.table2
idxcol = table.cols.var1.index
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1, type(rowList1))
print("Should look like:", rowList2, type(rowList2))
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test10c_moveIndex(self):
"""Checking moving a table with an index (small node cache)."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10c_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a",
node_cache_slots=10)
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 column:", idxcol)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test10d_moveIndex(self):
"""Checking moving a table with an index (no node cache)."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10d_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a",
node_cache_slots=0)
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 column:", idxcol)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test11a_removeTableWithIndex(self):
"""Checking removing a table with indexes."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11a_removeTableWithIndex..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 column:", table.cols.var1)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# delete the table
self.h5file.remove_node("/table")
if verbose:
print("After deletion")
self.assertTrue("table" not in self.h5file.root)
# re-create the table and the index again
table = self.h5file.create_table("/", 'table', TDescr, "New table",
self.filters, self.nrows)
for i in range(self.nrows):
table.row['var1'] = str(i)
table.row['var2'] = i % 2
table.row['var3'] = i
table.row['var4'] = float(self.nrows - i - 1)
table.row.append()
table.flush()
# Index all entries:
for col in six.itervalues(table.colinstances):
indexrows = col.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
def test11b_removeTableWithIndex(self):
"""Checking removing a table with indexes (persistent version 2)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11b_removeTableWithIndex..." %
self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 column:", table.cols.var1)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# delete the table
self.h5file.remove_node("/table")
if verbose:
print("After deletion")
self.assertTrue("table" not in self.h5file.root)
# close and reopen the file
self._reopen(mode="r+")
# re-create the table and the index again
table = self.h5file.create_table("/", 'table', TDescr, "New table",
self.filters, self.nrows)
for i in range(self.nrows):
table.row['var1'] = str(i)
table.row['var2'] = i % 2
table.row['var3'] = i
table.row['var4'] = float(self.nrows - i - 1)
table.row.append()
table.flush()
# Index all entries:
for col in six.itervalues(table.colinstances):
indexrows = col.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Test provided by Andrew Straw
def test11c_removeTableWithIndex(self):
"""Checking removing a table with indexes (persistent version 3)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11c_removeTableWithIndex..." %
self.__class__.__name__)
class Distance(tables.IsDescription):
frame = Int32Col(pos=0)
distance = FloatCol(pos=1)
# Delete the old temporal file
os.remove(self.h5fname)
self.h5fname = tempfile.mktemp(".h5")
self.h5file = tables.open_file(self.h5fname, mode='w')
table = self.h5file.create_table(
self.h5file.root, 'distance_table', Distance)
table.cols.frame.create_index(_blocksizes=small_blocksizes)
r = table.row
for i in range(10):
r['frame'] = i
r['distance'] = float(i**2)
r.append()
table.flush()
self._reopen(mode='r+')
self.h5file.remove_node(self.h5file.root.distance_table)
def test12_doubleIterate(self):
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
tests = [1, 4, self.nrows]
if self.nrows > 500:
tests.append(self.nrows - 500)
for limit in tests:
handle_a = [0, table.where('(var3 < e)', dict(e=limit))]
handle_b = [0, table.where('(var3 < e)', dict(e=limit))]
try:
while True:
next(handle_b[1])
handle_b[0] += 1
except StopIteration:
for _ in handle_a[1]:
handle_a[0] += 1
for _ in handle_b[1]:
handle_b[0] += 1
self.assertEqual(handle_a[0], limit)
self.assertEqual(handle_b[0], limit)
self.assertEqual(
len(list(table.where('(var3 < e)', dict(e=limit)))), limit)
small_ss = small_blocksizes[2]
class BasicReadTestCase(BasicTestCase):
compress = 0
complib = "zlib"
shuffle = 0
fletcher32 = 0
nrows = small_ss
class ZlibReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 0
fletcher32 = 0
nrows = small_ss
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class BloscReadTestCase(BasicTestCase):
compress = 1
complib = "blosc"
shuffle = 0
fletcher32 = 0
nrows = small_ss
@unittest.skipIf(not common.lzo_avail, 'LZO compression library not available')
class LZOReadTestCase(BasicTestCase):
compress = 1
complib = "lzo"
shuffle = 0
fletcher32 = 0
nrows = small_ss
@unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class Bzip2ReadTestCase(BasicTestCase):
compress = 1
complib = "bzip2"
shuffle = 0
fletcher32 = 0
nrows = small_ss
class ShuffleReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 1
fletcher32 = 0
nrows = small_ss
class Fletcher32ReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 0
fletcher32 = 1
nrows = small_ss
class ShuffleFletcher32ReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 1
fletcher32 = 1
nrows = small_ss
class OneHalfTestCase(BasicTestCase):
nrows = small_ss + small_ss//2
class UpperBoundTestCase(BasicTestCase):
nrows = small_ss + 1
class LowerBoundTestCase(BasicTestCase):
nrows = small_ss * 2-1
class DeepTableIndexTestCase(common.TempFileMixin, TestCase):
nrows = minRowIndex
def test01(self):
"""Checking the indexing of a table in a 2nd level hierarchy"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test01b(self):
"""Checking the indexing of a table in 2nd level
(persistent version)"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Close and re-open this file
self._reopen(mode='a')
table = self.h5file.root.agroup.table
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test02(self):
"""Checking the indexing of a table in a 4th level hierarchy"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
group = self.h5file.create_group(group, "agroup")
group = self.h5file.create_group(group, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test02b(self):
"""Checking the indexing of a table in a 4th level
(persistent version)"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
group = self.h5file.create_group(group, "agroup")
group = self.h5file.create_group(group, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Close and re-open this file
self._reopen(mode='a')
table = self.h5file.root.agroup.agroup.agroup.table
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test03(self):
"""Checking the indexing of a table in a 100th level hierarchy"""
# Create an instance of an HDF5 Table
group = self.h5file.root
for i in range(100):
group = self.h5file.create_group(group, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
class IndexProps(object):
def __init__(self, auto=default_auto_index, filters=default_index_filters):
self.auto = auto
self.filters = filters
DefaultProps = IndexProps()
NoAutoProps = IndexProps(auto=False)
ChangeFiltersProps = IndexProps(
filters=tables.Filters(complevel=6, complib="zlib",
shuffle=False, fletcher32=False))
class AutomaticIndexingTestCase(common.TempFileMixin, TestCase):
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
small_blocksizes = (16, 8, 4, 2)
def setUp(self):
super(AutomaticIndexingTestCase, self).setUp()
# Create an instance of an HDF5 Table
title = "This is the IndexArray title"
root = self.h5file.root
# Make the chunkshape smaller or equal than small_blocksizes[-1]
chunkshape = (2,)
self.table = self.h5file.create_table(root, 'table', TDescr, title,
None, self.nrows,
chunkshape=chunkshape)
self.table.autoindex = self.iprops.auto
for colname in self.colsToIndex:
self.table.colinstances[colname].create_index(
_blocksizes=self.small_blocksizes)
for i in range(self.nrows):
# Fill rows with defaults
self.table.row.append()
self.table.flush()
if self.reopen:
self._reopen(mode='a')
self.table = self.h5file.root.table
def test01_attrs(self):
"""Checking indexing attributes (part1)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_attrs..." % self.__class__.__name__)
table = self.table
if self.iprops is DefaultProps:
self.assertEqual(table.indexed, 0)
else:
self.assertEqual(table.indexed, 1)
if self.iprops is DefaultProps:
self.assertEqual(table.colindexed["var1"], 0)
self.assertTrue(table.cols.var1.index is None)
self.assertEqual(table.colindexed["var2"], 0)
self.assertTrue(table.cols.var2.index is None)
self.assertEqual(table.colindexed["var3"], 0)
self.assertTrue(table.cols.var3.index is None)
self.assertEqual(table.colindexed["var4"], 0)
self.assertTrue(table.cols.var4.index is None)
else:
# Check that the var1, var2 and var3 (and only these)
# has been indexed
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var2"], 1)
self.assertTrue(table.cols.var2.index is not None)
self.assertEqual(table.colindexed["var3"], 1)
self.assertTrue(table.cols.var3.index is not None)
self.assertEqual(table.colindexed["var4"], 0)
self.assertTrue(table.cols.var4.index is None)
def test02_attrs(self):
"""Checking indexing attributes (part2)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_attrs..." % self.__class__.__name__)
table = self.table
# Check the policy parameters
if verbose:
if table.indexed:
print("index props:", table.autoindex)
else:
print("Table is not indexed")
# Check non-default values for index saving policy
if self.iprops is NoAutoProps:
self.assertFalse(table.autoindex)
elif self.iprops is ChangeFiltersProps:
self.assertTrue(table.autoindex)
# Check Index() objects exists and are properly placed
if self.iprops is DefaultProps:
self.assertEqual(table.cols.var1.index, None)
self.assertEqual(table.cols.var2.index, None)
self.assertEqual(table.cols.var3.index, None)
self.assertEqual(table.cols.var4.index, None)
else:
self.assertTrue(isinstance(table.cols.var1.index, Index))
self.assertTrue(isinstance(table.cols.var2.index, Index))
self.assertTrue(isinstance(table.cols.var3.index, Index))
self.assertEqual(table.cols.var4.index, None)
def test03_counters(self):
"""Checking indexing counters"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test03_counters..." % self.__class__.__name__)
table = self.table
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
index = table.cols.var1.index
print("table rows:", table.nrows)
print("computed indexed rows:", index.nrows * index.slicesize)
else:
print("Table is not indexed")
if self.iprops is not DefaultProps:
index = table.cols.var1.index
indexedrows = index.nelements
self.assertEqual(table._indexedrows, indexedrows)
indexedrows = index.nelements
self.assertEqual(table._unsaved_indexedrows,
self.nrows - indexedrows)
def test04_noauto(self):
"""Checking indexing counters (non-automatic mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test04_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
index = table.cols.var1.index
print("computed indexed rows:", index.nelements)
else:
print("Table is not indexed")
# No unindexated rows should remain
index = table.cols.var1.index
if self.iprops is DefaultProps:
self.assertTrue(index is None)
else:
indexedrows = index.nelements
self.assertEqual(table._indexedrows, index.nelements)
self.assertEqual(table._unsaved_indexedrows,
self.nrows - indexedrows)
# Check non-default values for index saving policy
if self.iprops is NoAutoProps:
self.assertFalse(table.autoindex)
elif self.iprops is ChangeFiltersProps:
self.assertTrue(table.autoindex)
def test05_icounters(self):
"""Checking indexing counters (remove_rows)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test05_icounters..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
unsavedindexedrows = table._unsaved_indexedrows
# Now, remove some rows:
table.remove_rows(2, 4)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("original indexedrows:", indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
print("original unsavedindexedrows:", unsavedindexedrows)
# index = table.cols.var1.index
print("index dirty:", table.cols.var1.index.dirty)
else:
print("Table is not indexed")
# Check the counters
self.assertEqual(table.nrows, self.nrows - 2)
if self.iprops is NoAutoProps:
self.assertTrue(table.cols.var1.index.dirty)
# Check non-default values for index saving policy
if self.iprops is NoAutoProps:
self.assertFalse(table.autoindex)
elif self.iprops is ChangeFiltersProps:
self.assertTrue(table.autoindex)
def test06_dirty(self):
"""Checking dirty flags (remove_rows action)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test06_dirty..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Now, remove some rows:
table.remove_rows(3, 5)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the dirty flag for indexes
if verbose:
print("auto flag:", table.autoindex)
for colname in table.colnames:
if table.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table.cols._f_col(colname).index.dirty))
# Check the flags
for colname in table.colnames:
if table.cols._f_col(colname).index:
if not table.autoindex:
self.assertEqual(table.cols._f_col(colname).index.dirty,
True)
else:
self.assertEqual(table.cols._f_col(colname).index.dirty,
False)
def test07_noauto(self):
"""Checking indexing counters (modify_rows, no-auto mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# No unindexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
unsavedindexedrows = table._unsaved_indexedrows
# Now, modify just one row:
table.modify_rows(3, None, 1, [["asa", 0, 3, 3.1]])
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("original indexedrows:", indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
print("original unsavedindexedrows:", unsavedindexedrows)
index = table.cols.var1.index
print("computed indexed rows:", index.nelements)
else:
print("Table is not indexed")
# Check the counters
self.assertEqual(table.nrows, self.nrows)
if self.iprops is NoAutoProps:
self.assertTrue(table.cols.var1.index.dirty)
# Check the dirty flag for indexes
if verbose:
for colname in table.colnames:
if table.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table.cols._f_col(colname).index.dirty))
for colname in table.colnames:
if table.cols._f_col(colname).index:
if not table.autoindex:
self.assertEqual(table.cols._f_col(colname).index.dirty,
True)
else:
self.assertEqual(table.cols._f_col(colname).index.dirty,
False)
def test07b_noauto(self):
"""Checking indexing queries (modify in iterator, no-auto mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07b_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Do a query that uses indexes
res = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
# Now, modify just one row:
for row in table:
if row.nrow == 3:
row['var1'] = "asa"
row['var2'] = True
row['var3'] = 3
row['var4'] = 3.1
row.update()
table.flush()
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Do a query that uses indexes
resq = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
res_ = res + [3]
if verbose:
print("AutoIndex?:", table.autoindex)
print("Query results (original):", res)
print("Query results (after modifying table):", resq)
print("Should look like:", res_)
self.assertEqual(res_, resq)
def test07c_noauto(self):
"""Checking indexing queries (append, no-auto mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07c_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Do a query that uses indexes
res = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
# Now, append three rows
table.append([("asa", True, 1, 3.1)])
table.append([("asb", True, 2, 3.1)])
table.append([("asc", True, 3, 3.1)])
table.flush()
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Do a query that uses indexes
resq = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
res_ = res + [table.nrows-3, table.nrows-2, table.nrows-1]
if verbose:
print("AutoIndex?:", table.autoindex)
print("Query results (original):", res)
print("Query results (after modifying table):", resq)
print("Should look like:", res_)
self.assertEqual(res_, resq)
def test08_dirty(self):
"""Checking dirty flags (modify_columns)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test08_dirty..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, modify a couple of rows:
table.modify_columns(1, columns=[["asa", "asb"], [1., 2.]],
names=["var1", "var4"])
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the counters
self.assertEqual(table.nrows, self.nrows)
if self.iprops is NoAutoProps:
self.assertTrue(table.cols.var1.index.dirty)
# Check the dirty flag for indexes
if verbose:
for colname in table.colnames:
if table.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table.cols._f_col(colname).index.dirty))
for colname in table.colnames:
if table.cols._f_col(colname).index:
if not table.autoindex:
if colname in ["var1"]:
self.assertEqual(
table.cols._f_col(colname).index.dirty, True)
else:
self.assertEqual(
table.cols._f_col(colname).index.dirty, False)
else:
self.assertEqual(table.cols._f_col(colname).index.dirty,
False)
def test09a_propIndex(self):
"""Checking propagate Index feature in Table.copy() (attrs)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09a_propIndex..." % self.__class__.__name__)
table = self.table
# Don't force a sync in indexes
# table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, remove some rows to make columns dirty
# table.remove_rows(3,5)
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=True)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
index1 = table.cols.var1.index
index2 = table2.cols.var1.index
if verbose:
print("Copied index:", index2)
print("Original index:", index1)
if index1:
print("Elements in copied index:", index2.nelements)
print("Elements in original index:", index1.nelements)
# Check the counters
self.assertEqual(table.nrows, table2.nrows)
if table.indexed:
self.assertTrue(table2.indexed)
if self.iprops is DefaultProps:
# No index: the index should not exist
self.assertTrue(index1 is None)
self.assertTrue(index2 is None)
elif self.iprops is NoAutoProps:
self.assertTrue(index2 is not None)
# Check the dirty flag for indexes
if verbose:
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table2.cols._f_col(colname).index.dirty))
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
self.assertEqual(table2.cols._f_col(colname).index.dirty,
False)
def test09b_propIndex(self):
"""Checking that propindexes=False works"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09b_propIndex..." % self.__class__.__name__)
table = self.table
# Don't force a sync in indexes
# table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, remove some rows to make columns dirty
# table.remove_rows(3,5)
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=False)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
if verbose:
print("autoindex?:", self.iprops.auto)
print("Copied index indexed?:", table2.cols.var1.is_indexed)
print("Original index indexed?:", table.cols.var1.is_indexed)
if self.iprops is DefaultProps:
# No index: the index should not exist
self.assertFalse(table2.cols.var1.is_indexed)
self.assertFalse(table.cols.var1.is_indexed)
elif self.iprops is NoAutoProps:
self.assertFalse(table2.cols.var1.is_indexed)
self.assertTrue(table.cols.var1.is_indexed)
def test10_propIndex(self):
"""Checking propagate Index feature in Table.copy() (values)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10_propIndex..." % self.__class__.__name__)
table = self.table
# Don't force a sync in indexes
# table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, remove some rows to make columns dirty
# table.remove_rows(3,5)
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=True)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
index1 = table.cols.var3.index
index2 = table2.cols.var3.index
if verbose:
print("Copied index:", index2)
print("Original index:", index1)
if index1:
print("Elements in copied index:", index2.nelements)
print("Elements in original index:", index1.nelements)
def test11_propIndex(self):
"""Checking propagate Index feature in Table.copy() (dirty flags)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11_propIndex..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, modify an indexed column and an unindexed one
# to make the "var1" dirty
table.modify_columns(1, columns=[["asa", "asb"], [1., 2.]],
names=["var1", "var4"])
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=True)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
index1 = table.cols.var1.index
index2 = table2.cols.var1.index
if verbose:
print("Copied index:", index2)
print("Original index:", index1)
if index1:
print("Elements in copied index:", index2.nelements)
print("Elements in original index:", index1.nelements)
# Check the dirty flag for indexes
if verbose:
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table2.cols._f_col(colname).index.dirty))
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
if table2.autoindex:
# All the destination columns should be non-dirty because
# the copy removes the dirty state and puts the
# index in a sane state
self.assertEqual(table2.cols._f_col(colname).index.dirty,
False)
# minRowIndex = 10000 # just if one wants more indexed rows to be checked
class AI1TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 0
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI2TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI4bTestCase(AutomaticIndexingTestCase):
# nrows = 10012
nrows = 112
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI5TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 11-1
reopen = 0
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI6TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 21 + 1
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI7TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 12-1
# nrows = ss * 1-1 # faster test
reopen = 0
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI8TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 15 + 100
# nrows = ss * 1 + 100 # faster test
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI9TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss
reopen = 0
iprops = DefaultProps
colsToIndex = []
class AI10TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 1
iprops = DefaultProps
colsToIndex = []
class AI11TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 0
iprops = ChangeFiltersProps
colsToIndex = ['var1', 'var2', 'var3']
class AI12TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 0
iprops = ChangeFiltersProps
colsToIndex = ['var1', 'var2', 'var3']
class ManyNodesTestCase(common.TempFileMixin, TestCase):
opem_kwargs = dict(node_cache_slots=64)
def test00(self):
"""Indexing many nodes in one single session (based on bug #26)"""
IdxRecord = {
'f0': Int8Col(),
'f1': Int8Col(),
'f2': Int8Col(),
}
for qn in range(5):
for sn in range(5):
qchr = 'chr' + str(qn)
name = 'chr' + str(sn)
path = "/at/%s/pt" % (qchr)
table = self.h5file.create_table(
path, name, IdxRecord, createparents=1)
table.cols.f0.create_index()
table.cols.f1.create_index()
table.cols.f2.create_index()
table.row.append()
table.flush()
class IndexPropsChangeTestCase(TempFileMixin, TestCase):
"""Test case for changing index properties in a table."""
class MyDescription(tables.IsDescription):
icol = IntCol()
oldIndexProps = IndexProps()
newIndexProps = IndexProps(auto=False, filters=tables.Filters(complevel=9))
def setUp(self):
super(IndexPropsChangeTestCase, self).setUp()
table = self.h5file.create_table('/', 'test', self.MyDescription)
table.autoindex = self.oldIndexProps.auto
row = table.row
for i in range(100):
row['icol'] = i % 25
row.append()
table.flush()
self.table = table
def test_attributes(self):
"""Storing index properties as table attributes."""
for refprops in [self.oldIndexProps, self.newIndexProps]:
self.assertEqual(self.table.autoindex, refprops.auto)
self.table.autoindex = self.newIndexProps.auto
def test_copyattrs(self):
"""Copying index properties attributes."""
oldtable = self.table
newtable = oldtable.copy('/', 'test2')
self.assertEqual(oldtable.autoindex, newtable.autoindex)
class IndexFiltersTestCase(TempFileMixin, TestCase):
"""Test case for setting index filters."""
def setUp(self):
super(IndexFiltersTestCase, self).setUp()
description = {'icol': IntCol()}
self.table = self.h5file.create_table('/', 'test', description)
def test_createIndex(self):
"""Checking input parameters in new indexes."""
# Different from default.
argfilters = copy.copy(default_index_filters)
argfilters.shuffle = not default_index_filters.shuffle
# Different both from default and the previous one.
idxfilters = copy.copy(default_index_filters)
idxfilters.shuffle = not default_index_filters.shuffle
idxfilters.fletcher32 = not default_index_filters.fletcher32
icol = self.table.cols.icol
# First create
icol.create_index(kind='ultralight', optlevel=4)
self.assertEqual(icol.index.kind, 'ultralight')
self.assertEqual(icol.index.optlevel, 4)
self.assertEqual(icol.index.filters, default_index_filters)
icol.remove_index()
# Second create
icol.create_index(kind='medium', optlevel=3, filters=argfilters)
self.assertEqual(icol.index.kind, 'medium')
self.assertEqual(icol.index.optlevel, 3)
self.assertEqual(icol.index.filters, argfilters)
icol.remove_index()
def test_reindex(self):
"""Checking input parameters in recomputed indexes."""
icol = self.table.cols.icol
icol.create_index(
kind='full', optlevel=5, filters=tables.Filters(complevel=3))
kind = icol.index.kind
optlevel = icol.index.optlevel
filters = icol.index.filters
icol.reindex()
ni = icol.index
if verbose:
print("Old parameters: %s, %s, %s" % (kind, optlevel, filters))
print("New parameters: %s, %s, %s" % (
ni.kind, ni.optlevel, ni.filters))
self.assertEqual(ni.kind, kind)
self.assertEqual(ni.optlevel, optlevel)
self.assertEqual(ni.filters, filters)
class OldIndexTestCase(common.TestFileMixin, TestCase):
h5fname = test_filename("idx-std-1.x.h5")
def test1_x(self):
"""Check that files with 1.x indexes are recognized and warned."""
self.assertWarns(OldIndexWarning, self.h5file.get_node, "/table")
# Sensible parameters for indexing with small blocksizes
small_blocksizes = (512, 128, 32, 8)
class CompletelySortedIndexTestCase(TempFileMixin, TestCase):
"""Test case for testing a complete sort in a table."""
nrows = 100
nrowsinbuf = 11
class MyDescription(tables.IsDescription):
rcol = IntCol(pos=1)
icol = IntCol(pos=2)
def setUp(self):
super(CompletelySortedIndexTestCase, self).setUp()
table = self.h5file.create_table('/', 'table', self.MyDescription)
row = table.row
nrows = self.nrows
for i in range(nrows):
row['rcol'] = i
row['icol'] = nrows - i
row.append()
table.flush()
self.table = table
self.icol = self.table.cols.icol
# A full index with maximum optlevel should always be completely sorted
self.icol.create_csindex(_blocksizes=small_blocksizes)
def test00_isCompletelySortedIndex(self):
"""Testing the Column.is_csi property."""
icol = self.icol
self.assertEqual(icol.index.is_csi, True)
icol.remove_index()
# Other kinds than full, should never return a CSI
icol.create_index(kind="medium", optlevel=9)
self.assertEqual(icol.index.is_csi, False)
icol.remove_index()
# As the table is small, lesser optlevels should be able to
# create a completely sorted index too.
icol.create_index(kind="full", optlevel=6)
self.assertEqual(icol.index.is_csi, True)
# Checking a CSI in a sorted copy
self.table.copy("/", 'table2', sortby='icol', checkCSI=True)
self.assertEqual(icol.index.is_csi, True)
def test01_readSorted1(self):
"""Testing the Index.read_sorted() method with no arguments."""
icol = self.icol
sortedcol = numpy.sort(icol[:])
sortedcol2 = icol.index.read_sorted()
if verbose:
print("Original sorted column:", sortedcol)
print("The values from the index:", sortedcol2)
self.assertTrue(allequal(sortedcol, sortedcol2))
def test01_readSorted2(self):
"""Testing the Index.read_sorted() method with arguments (I)."""
icol = self.icol
sortedcol = numpy.sort(icol[:])[30:55]
sortedcol2 = icol.index.read_sorted(30, 55)
if verbose:
print("Original sorted column:", sortedcol)
print("The values from the index:", sortedcol2)
self.assertTrue(allequal(sortedcol, sortedcol2))
def test01_readSorted3(self):
"""Testing the Index.read_sorted() method with arguments (II)."""
icol = self.icol
sortedcol = numpy.sort(icol[:])[33:97]
sortedcol2 = icol.index.read_sorted(33, 97)
if verbose:
print("Original sorted column:", sortedcol)
print("The values from the index:", sortedcol2)
self.assertTrue(allequal(sortedcol, sortedcol2))
def test02_readIndices1(self):
"""Testing the Index.read_indices() method with no arguments."""
icol = self.icol
indicescol = numpy.argsort(icol[:]).astype('uint64')
indicescol2 = icol.index.read_indices()
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices2(self):
"""Testing the Index.read_indices() method with arguments (I)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[30:55].astype('uint64')
indicescol2 = icol.index.read_indices(30, 55)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices3(self):
"""Testing the Index.read_indices() method with arguments (II)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[33:97].astype('uint64')
indicescol2 = icol.index.read_indices(33, 97)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices4(self):
"""Testing the Index.read_indices() method with arguments (III)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[33:97:2].astype('uint64')
indicescol2 = icol.index.read_indices(33, 97, 2)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices5(self):
"""Testing the Index.read_indices() method with arguments (IV)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[33:55:5].astype('uint64')
indicescol2 = icol.index.read_indices(33, 55, 5)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices6(self):
"""Testing the Index.read_indices() method with step only."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[::3].astype('uint64')
indicescol2 = icol.index.read_indices(step=3)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test03_getitem1(self):
"""Testing the Index.__getitem__() method with no arguments."""
icol = self.icol
indicescol = numpy.argsort(icol[:]).astype('uint64')
indicescol2 = icol.index[:]
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test03_getitem2(self):
"""Testing the Index.__getitem__() method with start."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[31].astype('uint64')
indicescol2 = icol.index[31]
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test03_getitem3(self):
"""Testing the Index.__getitem__() method with start, stop."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[2:16].astype('uint64')
indicescol2 = icol.index[2:16]
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test04_itersorted1(self):
"""Testing the Table.itersorted() method with no arguments."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol')], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted2(self):
"""Testing the Table.itersorted() method with a start."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[15:]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=15)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted3(self):
"""Testing the Table.itersorted() method with a stop."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[:20]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', stop=20)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted4(self):
"""Testing the Table.itersorted() method with a start and stop."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[15:20]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=15, stop=20)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted5(self):
"""Testing the Table.itersorted() method with a start, stop and
step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[15:45:4]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=15, stop=45, step=4)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted6(self):
"""Testing the Table.itersorted() method with a start, stop and
step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[33:55:5]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=33, stop=55, step=5)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted7(self):
"""Testing the Table.itersorted() method with checkCSI=True."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', checkCSI=True)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted8(self):
"""Testing the Table.itersorted() method with a start, stop and
negative step."""
# see also gh-252
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[55:33:-5]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=55, stop=33, step=-5)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted9(self):
"""Testing the Table.itersorted() method with a negative step."""
# see also gh-252
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::-5]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', step=-5)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted1(self):
"""Testing the Table.read_sorted() method with no arguments."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol')
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted2(self):
"""Testing the Table.read_sorted() method with a start."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[16:17]
sortedtable2 = table.read_sorted('icol', start=16)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted3(self):
"""Testing the Table.read_sorted() method with a start and stop."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[16:33]
sortedtable2 = table.read_sorted('icol', start=16, stop=33)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted4(self):
"""Testing the Table.read_sorted() method with a start, stop and
step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[33:55:5]
sortedtable2 = table.read_sorted('icol', start=33, stop=55, step=5)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted5(self):
"""Testing the Table.read_sorted() method with only a step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::3]
sortedtable2 = table.read_sorted('icol', step=3)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted6(self):
"""Testing the Table.read_sorted() method with negative step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::-1]
sortedtable2 = table.read_sorted('icol', step=-1)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted7(self):
"""Testing the Table.read_sorted() method with negative step (II)."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::-2]
sortedtable2 = table.read_sorted('icol', step=-2)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted8(self):
"""Testing the Table.read_sorted() method with negative step (III))."""
table = self.table
sstart = 100-24-1
sstop = 100-54-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-1]
sortedtable2 = table.read_sorted('icol', start=24, stop=54, step=-1)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted9(self):
"""Testing the Table.read_sorted() method with negative step (IV))."""
table = self.table
sstart = 100-14-1
sstop = 100-54-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-3]
sortedtable2 = table.read_sorted('icol', start=14, stop=54, step=-3)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted10(self):
"""Testing the Table.read_sorted() method with negative step (V))."""
table = self.table
sstart = 100-24-1
sstop = 100-25-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-2]
sortedtable2 = table.read_sorted('icol', start=24, stop=25, step=-2)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted11(self):
"""Testing the Table.read_sorted() method with start > stop."""
table = self.table
sstart = 100-137-1
sstop = 100-25-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-2]
sortedtable2 = table.read_sorted('icol', start=137, stop=25, step=-2)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05a_readSorted12(self):
"""Testing the Table.read_sorted() method with checkCSI (I)."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol', checkCSI=True)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05b_readSorted12(self):
"""Testing the Table.read_sorted() method with checkCSI (II)."""
table = self.table
self.assertRaises(ValueError,
table.read_sorted, "rcol", checkCSI=False)
def test06_copy_sorted1(self):
"""Testing the Table.copy(sortby) method with no arguments."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted2(self):
"""Testing the Table.copy(sortby) method with step=-1."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol", step=-1)
sortedtable = numpy.sort(table[:], order='icol')[::-1]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted3(self):
"""Testing the Table.copy(sortby) method with only a start."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol", start=3)
sortedtable = numpy.sort(table[:], order='icol')[3:4]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted4(self):
"""Testing the Table.copy(sortby) method with start, stop."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol", start=3, stop=40)
sortedtable = numpy.sort(table[:], order='icol')[3:40]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted5(self):
"""Testing the Table.copy(sortby) method with start, stop, step."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol",
start=3, stop=33, step=5)
sortedtable = numpy.sort(table[:], order='icol')[3:33:5]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted6(self):
"""Testing the Table.copy(sortby) method after table re-opening."""
self._reopen(mode='a')
table = self.h5file.root.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted7(self):
"""Testing the `checkCSI` parameter of Table.copy() (I)."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
self.assertRaises(ValueError,
table2.copy, "/", 'table3',
sortby="rcol", checkCSI=False)
def test06_copy_sorted8(self):
"""Testing the `checkCSI` parameter of Table.copy() (II)."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
self.assertRaises(ValueError,
table2.copy, "/", 'table3',
sortby="rcol", checkCSI=True)
def test07_isCSI_noelements(self):
"""Testing the representation of an index with no elements."""
t2 = self.h5file.create_table('/', 't2', self.MyDescription)
irows = t2.cols.rcol.create_csindex()
if verbose:
print("repr(t2)-->\n", repr(t2))
self.assertEqual(irows, 0)
self.assertEqual(t2.colindexes['rcol'].is_csi, False)
class ReadSortedIndexTestCase(TempFileMixin, TestCase):
"""Test case for testing sorted reading in a "full" sorted column."""
nrows = 100
nrowsinbuf = 11
class MyDescription(tables.IsDescription):
rcol = IntCol(pos=1)
icol = IntCol(pos=2)
def setUp(self):
super(ReadSortedIndexTestCase, self).setUp()
table = self.h5file.create_table('/', 'table', self.MyDescription)
row = table.row
nrows = self.nrows
for i in range(nrows):
row['rcol'] = i
row['icol'] = nrows - i
row.append()
table.flush()
self.table = table
self.icol = self.table.cols.icol
# A full index with maximum optlevel should always be completely sorted
self.icol.create_index(optlevel=self.optlevel, kind="full",
_blocksizes=small_blocksizes)
def test01_readSorted1(self):
"""Testing the Table.read_sorted() method with no arguments."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol')
if verbose:
print("Sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
# Compare with the sorted read table because we have no
# guarantees that read_sorted returns a completely sorted table
self.assertTrue(allequal(sortedtable,
numpy.sort(sortedtable2, order="icol")))
def test01_readSorted2(self):
"""Testing the Table.read_sorted() method with no arguments
(re-open)."""
self._reopen()
table = self.h5file.root.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol')
if verbose:
print("Sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
# Compare with the sorted read table because we have no
# guarantees that read_sorted returns a completely sorted table
self.assertTrue(allequal(sortedtable,
numpy.sort(sortedtable2, order="icol")))
def test02_copy_sorted1(self):
"""Testing the Table.copy(sortby) method."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.sort(table2[:], order='icol')
if verbose:
print("Original table:", table2[:])
print("The sorted values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test02_copy_sorted2(self):
"""Testing the Table.copy(sortby) method after table re-opening."""
self._reopen(mode='a')
table = self.h5file.root.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.sort(table2[:], order='icol')
if verbose:
print("Original table:", table2[:])
print("The sorted values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
class ReadSortedIndex0(ReadSortedIndexTestCase):
optlevel = 0
class ReadSortedIndex3(ReadSortedIndexTestCase):
optlevel = 3
class ReadSortedIndex6(ReadSortedIndexTestCase):
optlevel = 6
class ReadSortedIndex9(ReadSortedIndexTestCase):
optlevel = 9
class Issue156TestBase(common.TempFileMixin, TestCase):
# field name in table according to which test_copysort() sorts the table
sort_field = None
def setUp(self):
super(Issue156TestBase, self).setUp()
# create nested table
class Foo(tables.IsDescription):
frame = UInt16Col()
class Bar(tables.IsDescription):
code = UInt16Col()
table = self.h5file.create_table('/', 'foo', Foo,
filters=tables.Filters(3, 'zlib'),
createparents=True)
self.h5file.flush()
# fill table with 10 random numbers
for k in range(10):
row = table.row
row['frame'] = numpy.random.random_integers(0, 2**16-1)
row['Bar/code'] = numpy.random.random_integers(0, 2**16-1)
row.append()
self.h5file.flush()
def test_copysort(self):
# copy table
oldNode = self.h5file.get_node('/foo')
# create completely sorted index on a main column
oldNode.colinstances[self.sort_field].create_csindex()
# this fails on ade2ba123efd267fd31
# see gh-156
new_node = oldNode.copy(newname='foo2', overwrite=True,
sortby=self.sort_field, checkCSI=True,
propindexes=True)
# check column is sorted
self.assertTrue(numpy.all(
new_node.col(self.sort_field) ==
sorted(oldNode.col(self.sort_field))))
# check index is available
self.assertTrue(self.sort_field in new_node.colindexes)
# check CSI was propagated
self.assertTrue(new_node.colindexes[self.sort_field].is_csi)
class Issue156TestCase01(Issue156TestBase):
# sort by field from non nested entry
sort_field = 'frame'
class Issue156TestCase02(Issue156TestBase):
# sort by field from nested entry
sort_field = 'Bar/code'
class Issue119Time32ColTestCase(common.TempFileMixin, TestCase):
"""TimeCol not properly indexing."""
col_typ = tables.Time32Col
values = [
0.93240451618785880,
0.76322375510776170,
0.16695030056300875,
0.91259117097807850,
0.93977847053454630,
0.51450406513503090,
0.24452129962257563,
0.85475938924825230,
0.32512326762476930,
0.75127635627046820,
]
def setUp(self):
super(Issue119Time32ColTestCase, self).setUp()
class Descr(tables.IsDescription):
when = self.col_typ(pos=1)
value = Float32Col(pos=2)
self.table = self.h5file.create_table('/', 'test', Descr)
self.t = 1321031471.0 # 11/11/11 11:11:11
data = [(self.t + i, item) for i, item in enumerate(self.values)]
self.table.append(data)
self.h5file.flush()
def test_timecol_issue(self):
tbl = self.table
t = self.t
wherestr = '(when >= %d) & (when < %d)' % (t, t + 5)
no_index = tbl.read_where(wherestr)
tbl.cols.when.create_index(_verbose=False)
with_index = tbl.read_where(wherestr)
self.assertTrue((no_index == with_index).all())
class Issue119Time64ColTestCase(Issue119Time32ColTestCase):
col_typ = tables.Time64Col
class TestIndexingNans(TempFileMixin, TestCase):
def test_issue_282(self):
trMap = {'index': Int64Col(), 'values': FloatCol()}
table = self.h5file.create_table('/', 'table', trMap)
r = table.row
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 0 else i
r.append()
table.flush()
table.cols.values.create_index()
# retrieve
result = table.read_where('(values >= 0)')
self.assertTrue(len(result) == 4)
def test_issue_327(self):
table = self.h5file.create_table('/', 'table', dict(
index=Int64Col(),
values=FloatCol(shape=()),
values2=FloatCol(shape=()),
))
r = table.row
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 2 or i == 3 else i
r['values2'] = i
r.append()
table.flush()
table.cols.values.create_index()
table.cols.values2.create_index()
results2 = table.read_where('(values2 > 0)')
self.assertTrue(len(results2), 4)
results = table.read_where('(values > 0)')
self.assertEqual(len(results), 2)
def test_issue_327_b(self):
table = self.h5file.create_table('/', 'table', dict(
index=Int64Col(),
values=FloatCol(shape=()),
values2=FloatCol(shape=()),
))
r = table.row
for _ in range(100):
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 2 or i == 3 else i
r['values2'] = i
r.append()
table.flush()
table.cols.values.create_index(_blocksizes=small_blocksizes)
table.cols.values2.create_index(_blocksizes=small_blocksizes)
results2 = table.read_where('(values2 > 0)')
self.assertTrue(len(results2), 400)
results = table.read_where('(values > 0)')
self.assertEqual(len(results), 200)
def test_csindex_nans(self):
table = self.h5file.create_table('/', 'table', dict(
index=Int64Col(),
values=FloatCol(shape=()),
values2=FloatCol(shape=()),
))
r = table.row
for x in range(100):
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 2 or i == 3 else i
r['values2'] = i
r.append()
table.flush()
table.cols.values.create_csindex(_blocksizes=small_blocksizes)
table.cols.values2.create_csindex(_blocksizes=small_blocksizes)
results2 = table.read_where('(values2 > 0)')
self.assertTrue(len(results2), 100*4)
results = table.read_where('(values > 0)')
self.assertEqual(len(results), 100*2)
def suite():
theSuite = unittest.TestSuite()
niter = 1
# heavy = 1 # Uncomment this only for testing purposes!
for n in range(niter):
theSuite.addTest(unittest.makeSuite(BasicReadTestCase))
theSuite.addTest(unittest.makeSuite(ZlibReadTestCase))
theSuite.addTest(unittest.makeSuite(BloscReadTestCase))
theSuite.addTest(unittest.makeSuite(LZOReadTestCase))
theSuite.addTest(unittest.makeSuite(Bzip2ReadTestCase))
theSuite.addTest(unittest.makeSuite(ShuffleReadTestCase))
theSuite.addTest(unittest.makeSuite(Fletcher32ReadTestCase))
theSuite.addTest(unittest.makeSuite(ShuffleFletcher32ReadTestCase))
theSuite.addTest(unittest.makeSuite(OneHalfTestCase))
theSuite.addTest(unittest.makeSuite(UpperBoundTestCase))
theSuite.addTest(unittest.makeSuite(LowerBoundTestCase))
theSuite.addTest(unittest.makeSuite(AI1TestCase))
theSuite.addTest(unittest.makeSuite(AI2TestCase))
theSuite.addTest(unittest.makeSuite(AI9TestCase))
theSuite.addTest(unittest.makeSuite(DeepTableIndexTestCase))
theSuite.addTest(unittest.makeSuite(IndexPropsChangeTestCase))
theSuite.addTest(unittest.makeSuite(IndexFiltersTestCase))
theSuite.addTest(unittest.makeSuite(OldIndexTestCase))
theSuite.addTest(unittest.makeSuite(CompletelySortedIndexTestCase))
theSuite.addTest(unittest.makeSuite(ManyNodesTestCase))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex0))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex3))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex6))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex9))
theSuite.addTest(unittest.makeSuite(Issue156TestCase01))
theSuite.addTest(unittest.makeSuite(Issue156TestCase02))
theSuite.addTest(unittest.makeSuite(Issue119Time32ColTestCase))
theSuite.addTest(unittest.makeSuite(Issue119Time64ColTestCase))
theSuite.addTest(unittest.makeSuite(TestIndexingNans))
if heavy:
# These are too heavy for normal testing
theSuite.addTest(unittest.makeSuite(AI4bTestCase))
theSuite.addTest(unittest.makeSuite(AI5TestCase))
theSuite.addTest(unittest.makeSuite(AI6TestCase))
theSuite.addTest(unittest.makeSuite(AI7TestCase))
theSuite.addTest(unittest.makeSuite(AI8TestCase))
theSuite.addTest(unittest.makeSuite(AI10TestCase))
theSuite.addTest(unittest.makeSuite(AI11TestCase))
theSuite.addTest(unittest.makeSuite(AI12TestCase))
return theSuite
if __name__ == '__main__':
import sys
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
| {
"content_hash": "c1df2f9355ad0018763adce31d2307a0",
"timestamp": "",
"source": "github",
"line_count": 2712,
"max_line_length": 80,
"avg_line_length": 36.73082595870206,
"alnum_prop": 0.587015881301825,
"repo_name": "jennolsen84/PyTables",
"id": "739b6b4fc387fcf0551729ac2dca89e54ba7cf4c",
"size": "99639",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "tables/tests/test_indexes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896101"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "21598"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "3325716"
},
{
"name": "Shell",
"bytes": "16985"
}
],
"symlink_target": ""
} |
from queue import Queue
class Deque(object):
"""Make deque object."""
def __init__(self, val=None):
"""Make new deque."""
self._container_q = Queue(val)
def pop(self):
"""Pop item from end of deque."""
return self._container_q._container.shift()
def pop_left(self):
"""Pop item from beginning of deque."""
return self._container_q._container.pop()
def append(self, val):
"""Append item to end of deque."""
self._container_q._container.append(val)
def append_left(self, val):
"""Append item to beginning of deque."""
self._container_q._container.insert(val)
def peek(self):
"""Return value at end of deque."""
cur = self._container_q._container.head
if cur == self._container_q._container._mark:
return None
while cur.next_node != self._container_q._container._mark:
cur = cur.next_node
return cur.get_data()
def peek_left(self):
"""Return value at beginning of deque."""
cur = self._container_q._container.head
if cur == self._container_q._container._mark:
return None
return self._container_q.peek()
def size(self):
"""Return size of deque."""
return self._container_q.size()
| {
"content_hash": "26b524780257d26185005bf32e0f78f1",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 30.136363636363637,
"alnum_prop": 0.5746606334841629,
"repo_name": "palindromed/data-structures2",
"id": "42a28d8e416964e279b22738c8b4d2d9995fe029",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/deque.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72025"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView, ListView, DetailView
admin.autodiscover()
urlpatterns = [
url(r'^', include('status.urls', namespace='status', app_name='status')),
url(r'^account/login/$', login, {'template_name': 'admin/login.html'}, 'login'),
url(r'^account/logout/$', logout, {'template_name': 'admin/logout.html'}, 'logout'),
url(r'^avatar/', include('avatar.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
]
# if settings.DEBUG:
# urlpatterns += url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
# 'document_root': settings.MEDIA_ROOT,
# }),
# urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| {
"content_hash": "0e9431e7b85c0e7f2ba58143a7dea2c6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 36.266666666666666,
"alnum_prop": 0.6773897058823529,
"repo_name": "darkpixel/statuspage",
"id": "f596256c857df3ae73432fa05d4944cc0c785481",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statuspage/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "17084"
},
{
"name": "Python",
"bytes": "32227"
}
],
"symlink_target": ""
} |
import ConfigParser
import json
import os
import sys
import unittest
from network import get_lan_ip
repo_root = os.path.abspath(os.path.join(__file__, "../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
sys.path.insert(1, os.path.join(repo_root, "tools", "wptserve"))
from wptserve import server
from selenium import webdriver
class WebDriverBaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = create_driver()
cls.webserver = server.WebTestHttpd(host=get_lan_ip())
cls.webserver.start()
cls.webserver.where_is = cls.webserver.get_url
@classmethod
def tearDownClass(cls):
cls.webserver.stop()
if cls.driver:
cls.driver.quit()
def create_driver():
config = ConfigParser.ConfigParser()
config.read('webdriver.cfg')
section = os.environ.get("WD_BROWSER", 'firefox')
if config.has_option(section, 'url'):
url = config.get(section, "url")
else:
url = 'http://127.0.0.1:4444/wd/hub'
capabilities = None
if config.has_option(section, 'capabilities'):
try:
capabilities = json.loads(config.get(section, "capabilities"))
except:
pass
mode = 'compatibility'
if config.has_option(section, 'mode'):
mode = config.get(section, 'mode')
if section == 'firefox':
driver = webdriver.Firefox()
elif section == 'chrome':
driver = webdriver.Chrome()
elif section == 'edge':
driver = webdriver.Remote()
elif section == 'ie':
driver = webdriver.Ie()
elif section == 'selendroid':
driver = webdriver.Android()
return driver
| {
"content_hash": "5d5c173cf752ff255e13ae8e5e1f088c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 74,
"avg_line_length": 28.283333333333335,
"alnum_prop": 0.6305244549204478,
"repo_name": "youtube/cobalt_sandbox",
"id": "851099936d29158f0084fb24ddb21fd97a249452",
"size": "1697",
"binary": false,
"copies": "142",
"ref": "refs/heads/main",
"path": "third_party/web_platform_tests/webdriver/base_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "7a3122f69f6a0e14a52c12ef62249efe",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 167,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.7078651685393258,
"repo_name": "antoinecarme/pyaf",
"id": "0448ef68871944b708b29e18ccfd0ebbd72cf926",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_LinearTrend/cycle_30/ar_12/test_artificial_32_Difference_LinearTrend_30_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import queue
import json
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import mykeys
def text(item):
"""
Get the text from the tweet
"""
if 'text' in item:
return item['text']
return None
#
# Event based source.
# This class is a tweepy StreamListener
# which is an event handler. Each tweet
# results in a call to on_data. Here
# we add the tweet to a Queue as
# a dictionary object created from the JSON.
#
# An instance of this class can be passed
# to the source as it's callable returns
# a iterable whose iterator will be an
# iterator against the queue. Iterating
# over the queue removes the tweet so
# that is will be placed on the source stream
# If the queue is empty the iterator blocks until
# until a new tweet arrives.
#
#
class tweets(StreamListener):
def __init__(self, terms):
self.terms = terms
def on_data(self, data):
self.items.put(json.loads(data))
return True
def on_error(self, status):
if status == 420:
return False
return False
def __call__(self):
self.items = queue.Queue()
auth = OAuthHandler(mykeys.ckey, mykeys.csecret)
auth.set_access_token(mykeys.atoken, mykeys.asecret)
self.stream = Stream(auth, self)
self.stream.filter(track=self.terms, async=True)
return self
def __iter__(self):
return iter(self.items.get, None)
| {
"content_hash": "f8eb3e4789eff857824489854f94a809",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 59,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6795580110497238,
"repo_name": "ibmkendrick/streamsx.topology",
"id": "6651f5d46ab9d508f8b5e1dbf9bfe53e77d906b2",
"size": "1514",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "samples/python/topology/tweepy/tweets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7805"
},
{
"name": "C++",
"bytes": "58469"
},
{
"name": "HTML",
"bytes": "11044"
},
{
"name": "Java",
"bytes": "1409695"
},
{
"name": "Makefile",
"bytes": "7673"
},
{
"name": "Perl",
"bytes": "6392"
},
{
"name": "Python",
"bytes": "373232"
},
{
"name": "Scala",
"bytes": "11124"
},
{
"name": "Shell",
"bytes": "4841"
}
],
"symlink_target": ""
} |
import cgi
import cgitb
# Abilita gli errori al server web/http
cgitb.enable()
# Le mie librerie Json, Html
import mjl, mhl
# Parametri generali
# Ci provo, ma ogniuno avra` di sicuro le sue differenze
TestoPagina="Configurazione db REDIS"
DirBase="/var/www"
ConfigFile=DirBase+"/conf/config.json"
ExecFile="/cgi-bin/readconfig.py"
ConfigNow=mjl.ReadJsonFile(ConfigFile)
# Start web page - Uso l'intestazione "web" della mia libreria
print (mhl.MyHtml())
print (mhl.MyHtmlHead())
print ("<h1>","<center>",TestoPagina,"</center>","</h1>","<hr/>","<br/>")
# Estraggo i valori della configurazione redis
ConfigNow = mjl.SearchValueJsonVar(ConfigNow,"redis")
form=cgi.FieldStorage()
# Praticamente il controllo di presenza nella form serve solo al campo "password"
# perche` ho lasciato al possibilita` che sia vuoto (nessuna password)
Error = ""
for i in range(len(ConfigNow)):
if ConfigNow[i]["name"] not in form:
if ConfigNow[i]["name"] == "password":
ConfigNow[i]["value"] = ""
else:
print("<br/>Errore:", ConfigNow[i]["name"])
Error = ConfigNow[i]["name"]
else:
ConfigNow[i]["value"] = cgi.escape(form[ConfigNow[i]["name"]].value)
if Error == "":
ConfigNew=mjl.ReadJsonFile(ConfigFile)
# Mi serve il puntatore a "value", e non ho trovato/pensati di meglio
# Devo scrivere il valore solo alla parte "value" di "redis"
for i in range(len(ConfigNew)):
if "redis" == ConfigNew[i]["name"]:
ConfigNew[i]["value"] = ConfigNow
# Scrivo qualcosa nella pagina per conferma dell'operazione, ma eseguo anche la scrittura del file
print("Error:",mjl.WriteJsonFile(ConfigNew,ConfigFile))
else:
# Teoricamente non saranno MAI errori, dovrebbero essere gia` stati eliminati a monte
print("<h2>Errore</h2>")
print("<p>",Error,"</p>")
# End web page
print (mhl.MyHtmlBottom()) | {
"content_hash": "8f487414e8765d3d7e243b1d09c744c5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 99,
"avg_line_length": 30.54237288135593,
"alnum_prop": 0.7130965593784684,
"repo_name": "raspibo/Livello1",
"id": "a22922e8dc45b458b2ba8526fb21d78714b32e3b",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "var/www/cgi-bin/writeconfig.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2004"
},
{
"name": "PHP",
"bytes": "39"
},
{
"name": "Python",
"bytes": "163109"
},
{
"name": "Shell",
"bytes": "10111"
}
],
"symlink_target": ""
} |
import sys
import logging
import nbformat
from runipy.notebook_runner import NotebookRunner
logging.basicConfig(level=logging.INFO)
nb = nbformat.read(sys.argv[1], 3)
r = NotebookRunner(nb)
r.run_notebook()
try:
output_notebook = sys.argv[2]
except IndexError:
output_notebook = sys.argv[1]
nbformat.write(nb, output_notebook, 3)
nb = nbformat.read(output_notebook, 3)
nbformat.write(nb, output_notebook, 4)
| {
"content_hash": "28ad00e3e7aad7cafd62d8d577efd9ad",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 24.58823529411765,
"alnum_prop": 0.7607655502392344,
"repo_name": "biosustain/cameo-notebooks",
"id": "04947972fec02b3d6ea05e6df498d7f6f5a3ac5e",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/run_notebook.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "47703955"
},
{
"name": "Python",
"bytes": "1007"
},
{
"name": "Shell",
"bytes": "532"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from . import views
import profiles.urls
import accounts.urls
import rbls.urls
urlpatterns = [
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^rbls/', include(rbls.urls, namespace='rbls')),
url(r'^users/', include(profiles.urls, namespace='profiles')),
url(r'^admin/', admin.site.urls),
url(r'^', include(accounts.urls, namespace='accounts')),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Include django debug toolbar if DEBUG is on
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| {
"content_hash": "05ae34fa3d0f4064d96f2674fc198695",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 32.82142857142857,
"alnum_prop": 0.7040261153427638,
"repo_name": "speedlight/rblmonitor",
"id": "0ef307d15812cb11fd7f5ba00674ec22337dc8f8",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rblmonitor/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3379"
},
{
"name": "HTML",
"bytes": "27831"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "34377"
}
],
"symlink_target": ""
} |
import xmlrpclib
import datetime
url = "http://localhost:8080/RPC2"
proxy = xmlrpclib.ServerProxy(url)
text = u"il a souhaité que la présidence trace à nice le chemin pour l' avenir ."
source = "Mein kleines Puppenhaus ."
target = "My small doll house ."
#align = "1-1 2-2 3-3 3-4 5-4"
align = "0-0 1-1 2-2 2-3 3-4"
params = {"source":source, "target":target, "alignment":align}
print "Updating with %s ..." %params
result = proxy.updater(params)
print result
| {
"content_hash": "e6b0d63b625e3261eeb6551bdaa5901f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.6946236559139785,
"repo_name": "shyamjvs/cs626_project",
"id": "ec8dd9b96b1b14a2e2c328881826102b8318e312",
"size": "565",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "stat_moses/tools/moses/contrib/server/update.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "10914"
},
{
"name": "Awk",
"bytes": "743344"
},
{
"name": "C",
"bytes": "3906431"
},
{
"name": "C#",
"bytes": "206438"
},
{
"name": "C++",
"bytes": "16167412"
},
{
"name": "CSS",
"bytes": "23612"
},
{
"name": "Eiffel",
"bytes": "66"
},
{
"name": "Emacs Lisp",
"bytes": "17034"
},
{
"name": "FORTRAN",
"bytes": "58"
},
{
"name": "Frege",
"bytes": "811021"
},
{
"name": "Java",
"bytes": "9070"
},
{
"name": "JavaScript",
"bytes": "273860"
},
{
"name": "Objective-C",
"bytes": "35399"
},
{
"name": "PHP",
"bytes": "133822"
},
{
"name": "Perl",
"bytes": "1739883"
},
{
"name": "Python",
"bytes": "625658"
},
{
"name": "R",
"bytes": "726"
},
{
"name": "Ruby",
"bytes": "4403"
},
{
"name": "Shell",
"bytes": "1302208"
},
{
"name": "Slash",
"bytes": "356"
},
{
"name": "SystemVerilog",
"bytes": "184"
},
{
"name": "XSLT",
"bytes": "759"
},
{
"name": "nesC",
"bytes": "366"
}
],
"symlink_target": ""
} |
from datetime import datetime
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
DEFAULT_DATE = datetime(2100, 1, 1)
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(
dag_id='test_start_date_scheduling',
start_date=datetime(2100, 1, 1))
dag1_task1 = DummyOperator(
task_id='dummy',
dag=dag1,
owner='airflow')
| {
"content_hash": "e684822b1427fd25fa5e95fd4c0c4ded",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 28.733333333333334,
"alnum_prop": 0.7378190255220418,
"repo_name": "RealImpactAnalytics/airflow",
"id": "990086693771af470e064b1251c60af5929b0bdf",
"size": "1245",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "tests/dags/test_scheduler_dags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "270710"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3765458"
},
{
"name": "Shell",
"bytes": "46923"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.