text
stringlengths 4
1.02M
| meta
dict |
---|---|
def paginator_range(current, stop, start=1):
"""Build a paginator.
Ex : paginator_range(10, 20) returns [1, 2, None, 8, 9, 10, 11, 12, None, 19, 20]
:param current: current page
:param stop: last page
:param start: first page
:return: a list of page numbers for shown pages and None when pages are skipped
"""
LIMIT = 2
ret = []
for i in range(start, stop+1):
# if beginning or middle or end
if (abs(start-1-i) <= LIMIT) or (abs(current-i) <= LIMIT) or (abs(stop+1-i) <= LIMIT):
ret.append(i)
# else : skip pages. Add None unless already added
elif ret[-1] != None:
ret.append(None)
return ret
| {
"content_hash": "fc18bb47ca113baa8a017effb97e6064",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 94,
"avg_line_length": 33.19047619047619,
"alnum_prop": 0.5839311334289814,
"repo_name": "Findspire/workflow",
"id": "430204e57b43c13dccf82df2bcacf97379d6e3cb",
"size": "1019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflow/utils/paginator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29389"
},
{
"name": "HTML",
"bytes": "36399"
},
{
"name": "JavaScript",
"bytes": "23797"
},
{
"name": "Python",
"bytes": "169506"
},
{
"name": "Ruby",
"bytes": "886"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.psvm import H2OSupportVectorMachineEstimator
def svm_svmguide1():
svmguide1 = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide1.svm"))
svmguide1_test = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide1_test.svm"))
# response is not -1/1 - needs to be explicitly converted
svmguide1["C1"] = svmguide1["C1"].asfactor()
svmguide1_test["C1"] = svmguide1_test["C1"].asfactor()
svm = H2OSupportVectorMachineEstimator(gamma=0.01, rank_ratio=0.1, disable_training_metrics=False)
svm.train(y="C1", training_frame=svmguide1, validation_frame=svmguide1_test)
svm.show()
pred = svm.predict(test_data=svmguide1)
assert len(pred) == len(svmguide1)
accuracy = svm.model_performance(valid=True).accuracy()[0][1]
assert accuracy >= 0.95
if __name__ == "__main__":
pyunit_utils.standalone_test(svm_svmguide1)
else:
svm_svmguide1()
| {
"content_hash": "e0ff270755f84770d6c2e0155cbd1cb2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 102,
"avg_line_length": 33.903225806451616,
"alnum_prop": 0.708848715509039,
"repo_name": "h2oai/h2o-3",
"id": "68a9c8b8c57627958b04d378785973e722b247d3",
"size": "1051",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/psvm/pyunit_svm_svmguide1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
} |
'''
Common tests for IOs:
* check presence of all necessary attr
* check types
* write/read consistency
See BaseTestIO.
The public URL is in url_for_tests.
To deposite new testing files, please create a account at
gin.g-node.org and upload files at NeuralEnsemble/ephy_testing_data
data repo.
'''
# needed for python 3 compatibility
from __future__ import absolute_import
__test__ = False
import os
from copy import copy
import unittest
from neo.core import Block, Segment
from neo.test.tools import (assert_same_sub_schema,
assert_neo_object_is_compliant,
assert_sub_schema_is_lazy_loaded,
assert_children_empty)
from neo.test.rawiotest.tools import (can_use_network, make_all_directories,
download_test_file, create_local_temp_dir)
from neo.test.iotest.tools import (cleanup_test_file,
close_object_safe, create_generic_io_object,
create_generic_reader,
create_generic_writer,
iter_generic_io_objects,
iter_generic_readers, iter_read_objects,
read_generic,
write_generic)
from neo.test.generate_datasets import generate_from_supported_objects
# url_for_tests = "https://portal.g-node.org/neo/" #This is the old place
url_for_tests = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/"
class BaseTestIO(object):
'''
This class make common tests for all IOs.
Several startegies:
* for IO able to read write : test_write_then_read
* for IO able to read write with hash conservation (optional):
test_read_then_write
* for all IOs : test_assert_readed_neo_object_is_compliant
2 cases:
* files are at G-node and downloaded:
download_test_files_if_not_present
* files are generated by MyIO.write()
'''
# ~ __test__ = False
# all IO test need to modify this:
ioclass = None # the IOclass to be tested
files_to_test = [] # list of files to test compliances
files_to_download = [] # when files are at G-Node
# when reading then writing produces files with identical hashes
hash_conserved_when_write_read = False
# when writing then reading creates an identical neo object
read_and_write_is_bijective = True
# allow environment to tell avoid using network
use_network = can_use_network()
local_test_dir = None
def setUp(self):
'''
Set up the test fixture. This is run for every test
'''
self.files_to_test = copy(self.__class__.files_to_test)
self.higher = self.ioclass.supported_objects[0]
self.shortname = self.ioclass.__name__.lower().rstrip('io')
# these objects can both be written and read
self.io_readandwrite = list(set(self.ioclass.readable_objects) &
set(self.ioclass.writeable_objects))
# these objects can be either written or read
self.io_readorwrite = list(set(self.ioclass.readable_objects) |
set(self.ioclass.writeable_objects))
self.create_local_dir_if_not_exists()
self.download_test_files_if_not_present()
self.files_generated = []
self.generate_files_for_io_able_to_write()
self.files_to_test.extend(self.files_generated)
def create_local_dir_if_not_exists(self):
'''
Create a local directory to store testing files and return it.
The directory path is also written to self.local_test_dir
'''
self.local_test_dir = create_local_temp_dir(
self.shortname, directory=os.environ.get("NEO_TEST_FILE_DIR", None))
return self.local_test_dir
def download_test_files_if_not_present(self):
'''
Download %s file at G-node for testing
url_for_tests is global at beginning of this file.
''' % self.ioclass.__name__
if not self.use_network:
raise unittest.SkipTest("Requires download of data from the web")
url = url_for_tests + self.shortname
try:
make_all_directories(self.files_to_download, self.local_test_dir)
download_test_file(self.files_to_download,
self.local_test_dir, url)
except IOError as exc:
raise unittest.TestCase.failureException(exc)
download_test_files_if_not_present.__test__ = False
def cleanup_file(self, path):
'''
Remove test files or directories safely.
'''
cleanup_test_file(self.ioclass, path, directory=self.local_test_dir)
def able_to_write_or_read(self, writeread=False, readwrite=False):
'''
Return True if generalized writing or reading is possible.
If writeread=True, return True if writing then reading is
possible and produces identical neo objects.
If readwrite=True, return True if reading then writing is possible
and produces files with identical hashes.
'''
# Find the highest object that is supported by the IO
# Test only if it is a Block or Segment, and if it can both read
# and write this object.
if self.higher not in self.io_readandwrite:
return False
if self.higher not in [Block, Segment]:
return False
# when io need external knowldge for writting or read such as
# sampling_rate (RawBinaryIO...) the test is too much complex to design
# genericaly.
if (self.higher in self.ioclass.read_params and
len(self.ioclass.read_params[self.higher]) != 0):
return False
# handle cases where the test should write then read
if writeread and not self.read_and_write_is_bijective:
return False
# handle cases where the test should read then write
if readwrite and not self.hash_conserved_when_write_read:
return False
return True
def get_filename_path(self, filename):
'''
Get the path to a filename in the current temporary file directory
'''
return os.path.join(self.local_test_dir, filename)
def generic_io_object(self, filename=None, return_path=False, clean=False):
'''
Create an io object in a generic way that can work with both
file-based and directory-based io objects.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the io object. return ioobj, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
'''
return create_generic_io_object(ioclass=self.ioclass,
filename=filename,
directory=self.local_test_dir,
return_path=return_path,
clean=clean)
def create_file_reader(self, filename=None, return_path=False,
clean=False, target=None, readall=False):
'''
Create a function that can read from the specified filename.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the reader function. return reader, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
If readall is True, use the read_all_ method instead of the read_
method. Default is False.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
res = create_generic_reader(ioobj, target=target, readall=readall)
if return_path:
return res, path
return res
def create_file_writer(self, filename=None, return_path=False,
clean=False, target=None):
'''
Create a function that can write from the specified filename.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the writer function. return writer, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'write' method.
If target is the Block or Segment class, use write_block or
write_segment, respectively.
If target is a string, use 'write_'+target.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
res = create_generic_writer(ioobj, target=target)
if return_path:
return res, path
return res
def read_file(self, filename=None, return_path=False, clean=False,
target=None, readall=False, lazy=False):
'''
Read from the specified filename.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the object. return obj, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
The lazy parameter is passed to the reader. Defaults is True.
If readall is True, use the read_all_ method instead of the read_
method. Default is False.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
obj = read_generic(ioobj, target=target, lazy=lazy,
readall=readall, return_reader=False)
if return_path:
return obj, path
return obj
def write_file(self, obj=None, filename=None, return_path=False,
clean=False, target=None):
'''
Write the target object to a file using the given neo io object ioobj.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the object. return obj, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
obj is the object to write. If obj is None, an object is created
automatically for the io class.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
obj = write_generic(ioobj, target=target, return_reader=False)
if return_path:
return obj, path
return obj
def iter_io_objects(self, return_path=False, clean=False):
'''
Return an iterable over the io objects created from files_to_test
If return_path is True, yield the full path of the file along with
the io object. yield ioobj, path Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
'''
return iter_generic_io_objects(ioclass=self.ioclass,
filenames=self.files_to_test,
directory=self.local_test_dir,
return_path=return_path,
clean=clean)
def iter_readers(self, target=None, readall=False,
return_path=False, return_ioobj=False, clean=False):
'''
Return an iterable over readers created from files_to_test.
If return_path is True, return the full path of the file along with
the reader object. return reader, path.
If return_ioobj is True, return the io object as well as the reader.
return reader, ioobj. Default is False.
If both return_path and return_ioobj is True,
return reader, path, ioobj. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If readall is True, use the read_all_ method instead of the
read_ method. Default is False.
'''
return iter_generic_readers(ioclass=self.ioclass,
filenames=self.files_to_test,
directory=self.local_test_dir,
return_path=return_path,
return_ioobj=return_ioobj,
target=target,
clean=clean,
readall=readall)
def iter_objects(self, target=None, return_path=False, return_ioobj=False,
return_reader=False, clean=False, readall=False,
lazy=False):
'''
Iterate over objects read from the list of filenames in files_to_test.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
If return_path is True, yield the full path of the file along with
the object. yield obj, path.
If return_ioobj is True, yield the io object as well as the object.
yield obj, ioobj. Default is False.
If return_reader is True, yield the io reader function as well as the
object. yield obj, reader. Default is False.
If some combination of return_path, return_ioobj, and return_reader
is True, they are yielded in the order: obj, path, ioobj, reader.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
The lazy parameters is passed to the reader. Defaults is True.
If readall is True, use the read_all_ method instead of the read_
method. Default is False.
'''
return iter_read_objects(ioclass=self.ioclass,
filenames=self.files_to_test,
directory=self.local_test_dir,
target=target,
return_path=return_path,
return_ioobj=return_ioobj,
return_reader=return_reader,
clean=clean, readall=readall,
lazy=lazy)
def generate_files_for_io_able_to_write(self):
'''
Write files for use in testing.
'''
self.files_generated = []
if not self.able_to_write_or_read():
return
generate_from_supported_objects(self.ioclass.supported_objects)
ioobj, path = self.generic_io_object(return_path=True, clean=True)
if ioobj is None:
return
self.files_generated.append(path)
write_generic(ioobj, target=self.higher)
close_object_safe(ioobj)
def test_write_then_read(self):
'''
Test for IO that are able to write and read - here %s:
1 - Generate a full schema with supported objects.
2 - Write to a file
3 - Read from the file
4 - Check the hierachy
5 - Check data
Work only for IO for Block and Segment for the highest object
(main cases).
''' % self.ioclass.__name__
if not self.able_to_write_or_read(writeread=True):
return
ioobj1 = self.generic_io_object(clean=True)
if ioobj1 is None:
return
ob1 = write_generic(ioobj1, target=self.higher)
close_object_safe(ioobj1)
ioobj2 = self.generic_io_object()
# Read the highest supported object from the file
obj_reader = create_generic_reader(ioobj2, target=False)
ob2 = obj_reader()[0]
if self.higher == Segment:
ob2 = ob2.segments[0]
# some formats (e.g. elphy) do not support double floating
# point spiketrains
try:
assert_same_sub_schema(ob1, ob2, True, 1e-8)
assert_neo_object_is_compliant(ob1)
assert_neo_object_is_compliant(ob2)
# intercept exceptions and add more information
except BaseException as exc:
raise
close_object_safe(ioobj2)
def test_read_then_write(self):
'''
Test for IO that are able to read and write, here %s:
1 - Read a file
2 Write object set in another file
3 Compare the 2 files hash
NOTE: TODO: Not implemented yet
''' % self.ioclass.__name__
if not self.able_to_write_or_read(readwrite=True):
return
# assert_file_contents_equal(a, b)
def test_assert_readed_neo_object_is_compliant(self):
'''
Reading %s files in `files_to_test` produces compliant objects.
Compliance test: neo.test.tools.assert_neo_object_is_compliant for
lazy mode.
''' % self.ioclass.__name__
for obj, path in self.iter_objects(lazy=False, return_path=True):
try:
# Check compliance of the block
assert_neo_object_is_compliant(obj)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from %s' % os.path.basename(path), )
raise
def test_readed_with_lazy_is_compliant(self):
'''
Reading %s files in `files_to_test` with `lazy` is compliant.
Test the reader with lazy = True.
The schema must contain proxy objects.
''' % self.ioclass.__name__
# This is for files presents at G-Node or generated
if self.ioclass.support_lazy:
for obj, path in self.iter_objects(lazy=True, return_path=True):
try:
assert_sub_schema_is_lazy_loaded(obj)
# intercept exceptions and add more information
except BaseException as exc:
raise
| {
"content_hash": "4a7a2f3663634a9ceb8304a8865d09a4",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 89,
"avg_line_length": 38.24612403100775,
"alnum_prop": 0.592399290600456,
"repo_name": "rgerkin/python-neo",
"id": "6bb14c8c892c136b0f5a1f43c9cd30ceaac4d567",
"size": "19759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/test/iotest/common_io_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2486594"
}
],
"symlink_target": ""
} |
import os
import yaml
from .. import app
__all__ = ('data',)
# Data loaded on import time so file system is not read and YAML parsed
# on every request.
with open(os.path.join(app.static_folder, 'data', 'beginners.yml')) as f:
data = yaml.safe_load(f.read())
| {
"content_hash": "a325907a468df84c5d254d4dc454c669",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 73,
"avg_line_length": 19.214285714285715,
"alnum_prop": 0.6728624535315985,
"repo_name": "honzajavorek/python.cz",
"id": "f3efef6a379260d2e44aa7377f307bbfaffedd69",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythoncz/models/beginners.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5778"
},
{
"name": "HTML",
"bytes": "57320"
},
{
"name": "JavaScript",
"bytes": "2493"
},
{
"name": "Python",
"bytes": "7975"
},
{
"name": "Shell",
"bytes": "1463"
}
],
"symlink_target": ""
} |
"""
sentry.utils.debug
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import cProfile
import re
import pstats
import six
import sys
from django.conf import settings
from django.http import HttpResponse
from six import StringIO
words_re = re.compile(r'\s+')
group_prefix_re = [
re.compile(r"^.*/django/[^/]+"),
re.compile(r"^(.*)/[^/]+$"), # extract module path
re.compile(r".*"), # catch strange entries
]
class ProfileMiddleware(object):
def can(self, request):
if 'prof' not in request.GET:
return False
if settings.DEBUG:
return True
if hasattr(request, 'user') and request.is_superuser():
return True
return False
def process_view(self, request, callback, callback_args, callback_kwargs):
if not self.can(request):
return
self.prof = cProfile.Profile()
return self.prof.runcall(callback, request, *callback_args, **callback_kwargs)
def get_group(self, filename):
for g in group_prefix_re:
name = g.findall(filename)
if name:
return name[0]
def get_summary(self, results_dict, total):
results = [(item[1], item[0]) for item in six.iteritems(results_dict)]
results.sort(reverse=True)
results = results[:40]
res = " tottime\n"
for item in results:
res += "%4.1f%% %7.3f %s\n" % (100 * item[0] / total if total else 0, item[0], item[1])
return res
def normalize_paths(self, stats):
import os.path
from pstats import add_func_stats, func_std_string
python_paths = sorted(sys.path, reverse=True)
def rel_filename(filename):
for path in python_paths:
if filename.startswith(path):
return filename[len(path) + 1:]
return os.path.basename(filename)
def func_strip_path(func_name):
filename, line, name = func_name
return rel_filename(filename), line, name
oldstats = stats.stats
stats.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in six.iteritems(oldstats):
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in six.iteritems(callers):
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = stats.top_level
stats.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
stats.max_name_len = max_name_len
stats.fcn_list = None
stats.all_callees = None
return self
def summary_for_files(self, stats_str):
stats_str = stats_str.split("\n")[5:]
mystats = {}
mygroups = {}
total = 0
for s in stats_str:
fields = words_re.split(s)
if len(fields) == 7:
time = float(fields[2])
total += time
filename = fields[6].split(":")[0]
if filename not in mystats:
mystats[filename] = 0
mystats[filename] += time
group = self.get_group(filename)
if group not in mygroups:
mygroups[group] = 0
mygroups[group] += time
return "\n" + \
" ---- By file ----\n\n" + self.get_summary(mystats, total) + "\n" + \
" ---- By group ---\n\n" + self.get_summary(mygroups, total) + \
"\n"
def process_response(self, request, response):
if not self.can(request):
return response
out = StringIO.StringIO()
old_stdout = sys.stdout
sys.stdout = out
stats = pstats.Stats(self.prof)
self.normalize_paths(stats)
stats.sort_stats('time', 'calls')
stats.print_stats()
sys.stdout = old_stdout
stats_str = out.getvalue()
content = "\n".join(stats_str.split("\n")[:40])
content += "\n\n"
content += self.summary_for_files(stats_str)
return HttpResponse(content, 'text/plain')
| {
"content_hash": "20f136ae3e6991f6ab434e792c86b8a9",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 99,
"avg_line_length": 29.866242038216562,
"alnum_prop": 0.5427596502452549,
"repo_name": "JackDanger/sentry",
"id": "1b2d40da65e2a082b6e53301261e2f8de5b0b263",
"size": "4689",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/sentry/utils/debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
} |
"""
The JS Shell Test Harness.
See the adjacent README.txt for more details.
"""
import os, sys, textwrap
from copy import copy
from subprocess import list2cmdline, call
from lib.results import NullTestOutput
from lib.tests import TestCase
from lib.results import ResultsSink
from lib.progressbar import ProgressBar
if (sys.platform.startswith('linux') or
sys.platform.startswith('darwin')
):
from lib.tasks_unix import run_all_tests
else:
from lib.tasks_win import run_all_tests
def run_tests(options, tests, results):
"""Run the given tests, sending raw results to the given results accumulator."""
try:
completed = run_all_tests(tests, results, options)
except KeyboardInterrupt:
completed = False
results.finish(completed)
def get_cpu_count():
"""
Guess at a reasonable parallelism count to set as the default for the
current machine and run.
"""
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError,NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError,ValueError):
pass
# Windows
try:
res = int(os.environ['NUMBER_OF_PROCESSORS'])
if res > 0:
return res
except (KeyError, ValueError):
pass
return 1
def parse_args():
"""
Parse command line arguments.
Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
options :object: The raw OptionParser output.
js_shell :str: The absolute location of the shell to test with.
requested_paths :set<str>: Test paths specially requested on the CLI.
excluded_paths :set<str>: Test paths specifically excluded by the CLI.
"""
from optparse import OptionParser, OptionGroup
op = OptionParser(usage=textwrap.dedent("""
%prog [OPTIONS] JS_SHELL [TESTS]
Shell output format: [ pass | fail | timeout | skip ] progress | time
""").strip())
op.add_option('--xul-info', dest='xul_info_src',
help='config data for xulRuntime (avoids search for config/autoconf.mk)')
harness_og = OptionGroup(op, "Harness Controls", "Control how tests are run.")
harness_og.add_option('-j', '--worker-count', type=int, default=max(1, get_cpu_count()),
help='Number of tests to run in parallel (default %default)')
harness_og.add_option('-t', '--timeout', type=float, default=150.0,
help='Set maximum time a test is allows to run (in seconds).')
harness_og.add_option('-a', '--args', dest='shell_args', default='',
help='Extra args to pass to the JS shell.')
harness_og.add_option('--jitflags', default='',
help='Example: --jitflags=m,amd to run each test with -m, -a -m -d [default=%default]')
harness_og.add_option('-g', '--debug', action='store_true', help='Run a test in debugger.')
harness_og.add_option('--debugger', default='gdb -q --args', help='Debugger command.')
harness_og.add_option('--valgrind', action='store_true', help='Run tests in valgrind.')
harness_og.add_option('--valgrind-args', default='', help='Extra args to pass to valgrind.')
op.add_option_group(harness_og)
input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
input_og.add_option('-f', '--file', dest='test_file', action='append',
help='Get tests from the given file.')
input_og.add_option('-x', '--exclude-file', action='append',
help='Exclude tests from the given file.')
input_og.add_option('-d', '--exclude-random', dest='random', action='store_false',
help='Exclude tests marked as "random."')
input_og.add_option('--run-skipped', action='store_true', help='Run tests marked as "skip."')
input_og.add_option('--run-only-skipped', action='store_true', help='Run only tests marked as "skip."')
input_og.add_option('--run-slow-tests', action='store_true',
help='Do not skip tests marked as "slow."')
input_og.add_option('--no-extensions', action='store_true',
help='Run only tests conforming to the ECMAScript 5 standard.')
op.add_option_group(input_og)
output_og = OptionGroup(op, "Output", "Modify the harness and tests output.")
output_og.add_option('-s', '--show-cmd', action='store_true',
help='Show exact commandline used to run each test.')
output_og.add_option('-o', '--show-output', action='store_true',
help="Print each test's output to the file given by --output-file.")
output_og.add_option('-F', '--failed-only', action='store_true',
help="If a --show-* option is given, only print output for failed tests.")
output_og.add_option('-O', '--output-file',
help='Write all output to the given file (default: stdout).')
output_og.add_option('--failure-file',
help='Write all not-passed tests to the given file.')
output_og.add_option('--no-progress', dest='hide_progress', action='store_true',
help='Do not show the progress bar.')
output_og.add_option('--tinderbox', action='store_true',
help='Use tinderbox-parseable output format.')
op.add_option_group(output_og)
special_og = OptionGroup(op, "Special", "Special modes that do not run tests.")
special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
help='Generate reftest manifest files.')
op.add_option_group(special_og)
options, args = op.parse_args()
# Acquire the JS shell given on the command line.
options.js_shell = None
requested_paths = set()
if len(args) > 0:
options.js_shell = os.path.abspath(args[0])
requested_paths |= set(args[1:])
# If we do not have a shell, we must be in a special mode.
if options.js_shell is None and not options.make_manifests:
op.error('missing JS_SHELL argument')
# Valgrind and gdb are mutually exclusive.
if options.valgrind and options.debug:
op.error("--valgrind and --debug are mutually exclusive.")
# Fill the debugger field, as needed.
prefix = options.debugger.split() if options.debug else []
if options.valgrind:
prefix = ['valgrind'] + options.valgrind_args.split()
if os.uname()[0] == 'Darwin':
prefix.append('--dsymutil=yes')
options.show_output = True
TestCase.set_js_cmd_prefix(options.js_shell, options.shell_args.split(), prefix)
# If files with lists of tests to run were specified, add them to the
# requested tests set.
if options.test_file:
for test_file in options.test_file:
requested_paths |= set([line.strip() for line in open(test_file).readlines()])
# If files with lists of tests to exclude were specified, add them to the
# excluded tests set.
excluded_paths = set()
if options.exclude_file:
for filename in options.exclude_file:
try:
fp = open(filename, 'r')
for line in fp:
if line.startswith('#'): continue
line = line.strip()
if not line: continue
excluded_paths |= set((line,))
finally:
fp.close()
# Handle output redirection, if requested and relevant.
options.output_fp = sys.stdout
if options.output_file:
if not options.show_cmd:
options.show_output = True
try:
options.output_fp = open(options.output_file, 'w')
except IOError, ex:
raise SystemExit("Failed to open output file: " + str(ex))
options.show = options.show_cmd or options.show_output
# Hide the progress bar if it will get in the way of other output.
options.hide_progress = ((options.show and
options.output_fp == sys.stdout) or
options.tinderbox or
ProgressBar.conservative_isatty() or
options.hide_progress)
return (options, requested_paths, excluded_paths)
def parse_jitflags(op_jitflags):
jitflags = [ [ '-' + flag for flag in flags ]
for flags in op_jitflags.split(',') ]
for flags in jitflags:
for flag in flags:
if flag not in ('-m', '-a', '-p', '-d', '-n'):
print('Invalid jit flag: "%s"'%flag)
sys.exit(1)
return jitflags
def load_tests(options, requested_paths, excluded_paths):
"""
Returns a tuple: (skipped_tests, test_list)
skip_list: [iterable<Test>] Tests found but skipped.
test_list: [iterable<Test>] Tests found that should be run.
"""
import lib.manifest as manifest
if options.js_shell is None:
xul_tester = manifest.NullXULInfoTester()
else:
if options.xul_info_src is None:
xul_info = manifest.XULInfo.create(options.js_shell)
else:
xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':')
xul_debug = xul_debug.lower() is 'true'
xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
xul_tester = manifest.XULInfoTester(xul_info, options.js_shell)
test_dir = os.path.dirname(os.path.abspath(__file__))
test_list = manifest.load(test_dir, xul_tester)
skip_list = []
if options.make_manifests:
manifest.make_manifests(options.make_manifests, test_list)
sys.exit()
# Create a new test list. Apply each JIT configuration to every test.
if options.jitflags:
new_test_list = []
jitflags_list = parse_jitflags(options.jitflags)
for test in test_list:
for jitflags in jitflags_list:
tmp_test = copy(test)
tmp_test.options = copy(test.options)
tmp_test.options.extend(jitflags)
new_test_list.append(tmp_test)
test_list = new_test_list
if options.test_file:
paths = set()
for test_file in options.test_file:
paths |= set([ line.strip() for line in open(test_file).readlines()])
test_list = [ _ for _ in test_list if _.path in paths ]
if requested_paths:
def p(path):
for arg in requested_paths:
if path.find(arg) != -1:
return True
return False
test_list = [ _ for _ in test_list if p(_.path) ]
if options.exclude_file:
test_list = [_ for _ in test_list if _.path not in excluded_paths]
if options.no_extensions:
pattern = os.sep + 'extensions' + os.sep
test_list = [_ for _ in test_list if pattern not in _.path]
if not options.random:
test_list = [ _ for _ in test_list if not _.random ]
if options.run_only_skipped:
options.run_skipped = True
test_list = [ _ for _ in test_list if not _.enable ]
if not options.run_slow_tests:
test_list = [ _ for _ in test_list if not _.slow ]
if not options.run_skipped:
skip_list = [ _ for _ in test_list if not _.enable ]
test_list = [ _ for _ in test_list if _.enable ]
return skip_list, test_list
def main():
options, requested_paths, excluded_paths = parse_args()
skip_list, test_list = load_tests(options, requested_paths, excluded_paths)
if not test_list:
print 'no tests selected'
return 1
test_dir = os.path.dirname(os.path.abspath(__file__))
if options.debug:
if len(test_list) > 1:
print('Multiple tests match command line arguments, debugger can only run one')
for tc in test_list:
print(' %s'%tc.path)
return 2
cmd = test_list[0].get_command(TestCase.js_cmd_prefix)
if options.show_cmd:
print list2cmdline(cmd)
if test_dir not in ('', '.'):
os.chdir(test_dir)
call(cmd)
return 0
curdir = os.getcwd()
if test_dir not in ('', '.'):
os.chdir(test_dir)
results = None
try:
results = ResultsSink(options, len(skip_list) + len(test_list))
for t in skip_list:
results.push(NullTestOutput(t))
run_tests(options, test_list, results)
finally:
os.chdir(curdir)
if results is None or not results.all_passed():
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "3692fb542c2f4bc7dd264fa26cb0c986",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 113,
"avg_line_length": 38.75987841945289,
"alnum_prop": 0.5971612296110415,
"repo_name": "sergecodd/FireFox-OS",
"id": "98b06bc193e28ce347e787219fe3745147d63871",
"size": "12774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "B2G/gecko/js/src/tests/jstests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "443"
},
{
"name": "ApacheConf",
"bytes": "85"
},
{
"name": "Assembly",
"bytes": "5123438"
},
{
"name": "Awk",
"bytes": "46481"
},
{
"name": "Batchfile",
"bytes": "56250"
},
{
"name": "C",
"bytes": "101720951"
},
{
"name": "C#",
"bytes": "38531"
},
{
"name": "C++",
"bytes": "148896543"
},
{
"name": "CMake",
"bytes": "23541"
},
{
"name": "CSS",
"bytes": "2758664"
},
{
"name": "DIGITAL Command Language",
"bytes": "56757"
},
{
"name": "Emacs Lisp",
"bytes": "12694"
},
{
"name": "Erlang",
"bytes": "889"
},
{
"name": "FLUX",
"bytes": "34449"
},
{
"name": "GLSL",
"bytes": "26344"
},
{
"name": "Gnuplot",
"bytes": "710"
},
{
"name": "Groff",
"bytes": "447012"
},
{
"name": "HTML",
"bytes": "43343468"
},
{
"name": "IDL",
"bytes": "1455122"
},
{
"name": "Java",
"bytes": "43261012"
},
{
"name": "JavaScript",
"bytes": "46646658"
},
{
"name": "Lex",
"bytes": "38358"
},
{
"name": "Logos",
"bytes": "21054"
},
{
"name": "Makefile",
"bytes": "2733844"
},
{
"name": "Matlab",
"bytes": "67316"
},
{
"name": "Max",
"bytes": "3698"
},
{
"name": "NSIS",
"bytes": "421625"
},
{
"name": "Objective-C",
"bytes": "877657"
},
{
"name": "Objective-C++",
"bytes": "737713"
},
{
"name": "PHP",
"bytes": "17415"
},
{
"name": "Pascal",
"bytes": "6780"
},
{
"name": "Perl",
"bytes": "1153180"
},
{
"name": "Perl6",
"bytes": "1255"
},
{
"name": "PostScript",
"bytes": "1139"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Protocol Buffer",
"bytes": "26553"
},
{
"name": "Python",
"bytes": "8453201"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3481"
},
{
"name": "Ruby",
"bytes": "5116"
},
{
"name": "Scilab",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "3383832"
},
{
"name": "SourcePawn",
"bytes": "23661"
},
{
"name": "TeX",
"bytes": "879606"
},
{
"name": "WebIDL",
"bytes": "1902"
},
{
"name": "XSLT",
"bytes": "13134"
},
{
"name": "Yacc",
"bytes": "112744"
}
],
"symlink_target": ""
} |
import sys
import os
import argparse
import shutil
import subprocess
from natsort import natsorted
import funannotate.library as lib
from Bio import SeqIO
def runSubprocess(cmd, dir):
proc = subprocess.Popen(
cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stdout:
print(stdout)
def runtbl2asn(folder, template, discrepency, organism, isolate, strain, parameters, version):
'''
function to run NCBI tbl2asn
'''
# get funannotate version
fun_version = lib.get_version()
# input should be a folder
if not os.path.isdir(folder):
print(("tbl2asn error: %s is not a directory, exiting" % folder))
sys.exit(1)
# based on organism, isolate, strain, construct meta info for -j flag
if not organism:
print("tbl2asn error: organism not specified")
sys.exit(1)
meta = "[organism=" + organism + "]"
if isolate:
isolate_meta = "[isolate=" + isolate + "]"
meta = meta + " " + isolate_meta
if strain:
strain_meta = "[strain=" + strain + "]"
meta = meta + " " + strain_meta
cmd = ['tbl2asn', '-y', '"Annotated using '+fun_version+'"', '-N',
str(version), '-p', folder, '-t', template, '-M', 'n', '-Z', discrepency, '-j', '"'+meta+'"', '-V', 'b', '-c', 'fx', '-T', '-a', 'r10u', '-l', 'paired-ends']
# check for custom parameters
if parameters:
params = parameters.split(' ')
cmd = cmd + params
runSubprocess(cmd, '.')
return ' '.join(cmd)
def locustagGB(input):
tag = []
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
if f.type == 'gene':
locusTag, ID, Parent = lib.getID(f, f.type)
tag.append(locusTag)
break
return tag[0].split('_', -1)[0]
def ncbiCheckErrors(error, validation, genename, fixOut):
ncbi_error = 0
actual_error = 0
with open(error, 'r') as errors:
for line in errors:
line = line.strip()
if 'ERROR' in line:
num = line.split(' ')[0]
ncbi_error += int(num)
# if errors in summary, then parse validation report, only get errors with gene names
if ncbi_error > 0:
# see if we can get the gene models that need to be fixed
needFixing = {}
with open(validation, 'r') as validationFile:
for line in validationFile:
line = line.strip()
if line.startswith('ERROR') and genename in line:
actual_error += 1
parts = line.split(' ')
for x in parts:
if genename in x:
ID = x.split('|')[-1]
if '-' in ID:
ID = ID.split('-')[0]
reason = line.split(' FEATURE:')[0]
reason = reason.split('] ')[-1]
if not ID in needFixing:
needFixing[ID] = reason
if actual_error > 0:
print(("There are %i gene models that need to be fixed." %
actual_error))
print('-------------------------------------------------------')
with open(fixOut, 'w') as fix:
fix.write('#GeneID\tError Message\n')
for k, v in natsorted(list(needFixing.items())):
fix.write('%s\t%s\n' % (k, v))
print(('%s\t%s' % (k, v)))
return actual_error
def main(args):
# setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(prog='gbk2parts.py',
description='''Script to convert GBK file to its components.''',
epilog="""Written by Jon Palmer (2018) [email protected]""",
formatter_class=MyFormatter)
parser.add_argument('-i', '--tbl', required=True,
help='Genome annotation in tbl format')
parser.add_argument('-f', '--fasta', required=True,
help='Genome in FASTA format')
parser.add_argument('-s', '--species', required=True,
help='Species name (e.g. "Aspergillus fumigatus") use quotes if there is a space')
parser.add_argument('--isolate', help='Isolate name (e.g. Af293)')
parser.add_argument('--strain', help='Strain name (e.g. CEA10)')
parser.add_argument(
'-t', '--tbl2asn', help='Custom parameters for tbl2asn, example: linkage and gap info')
parser.add_argument('--sbt', help='tbl2asn template file')
parser.add_argument('-o', '--output', help='Output basename')
args = parser.parse_args(args)
parentdir = os.path.dirname(lib.__file__)
# see if organism/species/isolate was passed at command line
organism = None
if args.species:
organism = args.species
else:
organism = os.path.basename(args.tbl).split('.t')[0]
if args.strain:
organism_name = organism+'_'+args.strain
elif args.isolate:
organism_name = organism+'_'+args.isolate
else:
organism_name = organism
organism_name = organism_name.replace(' ', '_')
if args.output:
outputname = args.output
else:
outputname = organism_name
# create tmp folder to run tbl2asn from
# make tmp folder
tmp = outputname + '_tmp'
if not os.path.exists(tmp):
os.makedirs(tmp)
# now move files into proper location
if not lib.checkannotations(args.fasta):
print(('FASTA genome file not found: {:}'.format(args.fasta)))
sys.exit(1)
if not lib.checkannotations(args.tbl):
print(('TBL annotations file not found: {:}'.format(args.tbl)))
sys.exit(1)
shutil.copyfile(args.fasta, os.path.join(tmp, 'genome.fsa'))
shutil.copyfile(args.tbl, os.path.join(tmp, 'genome.tbl'))
# now we can run tbl2asn
if args.sbt:
SBT = args.sbt
else:
SBT = os.path.join(parentdir, 'config', 'test.sbt')
discrep = outputname+'.discrepency.txt'
version = 1
runtbl2asn(tmp, SBT, discrep, organism,
args.isolate, args.strain, args.tbl2asn, version)
# check the output for errors for NCBI
final_fixes = os.path.join(tmp, 'models-need-fixing.txt')
prefix = locustagGB(os.path.join(tmp, 'genome.gbf'))
errors = ncbiCheckErrors(os.path.join(tmp, 'errorsummary.val'), os.path.join(
tmp, 'genome.val'), prefix, final_fixes)
# get output files
gbkout = outputname+'.gbk'
shutil.copyfile(os.path.join(tmp, 'genome.gbf'), gbkout)
sqnout = outputname + '.sqn'
shutil.copyfile(os.path.join(tmp, 'genome.sqn'), sqnout)
if errors < 1:
lib.SafeRemove(tmp)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "c7bd9e025b1d2f00032c5cfb649581ff",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 168,
"avg_line_length": 37.97860962566845,
"alnum_prop": 0.560264714165024,
"repo_name": "nextgenusfs/funannotate",
"id": "a4865177889c5b2d9f5b0411ced093e73a1f6704",
"size": "7149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "funannotate/utilities/tbl2gbk.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "291"
},
{
"name": "Dockerfile",
"bytes": "2669"
},
{
"name": "JavaScript",
"bytes": "2771"
},
{
"name": "Perl",
"bytes": "138330"
},
{
"name": "Python",
"bytes": "1542730"
},
{
"name": "Scala",
"bytes": "1523"
},
{
"name": "Shell",
"bytes": "2930"
},
{
"name": "Singularity",
"bytes": "100"
}
],
"symlink_target": ""
} |
import numpy as np
from .epochs import Epochs
from .utils import check_fname, logger, verbose, _check_option, _check_fname
from .io.constants import FIFF
from .io.open import fiff_open
from .io.pick import pick_types, pick_types_forward
from .io.proj import (Projection, _has_eeg_average_ref_proj, _read_proj,
make_projector, make_eeg_average_ref_proj, _write_proj)
from .io.write import start_and_end_file
from .event import make_fixed_length_events
from .parallel import parallel_func
from .cov import _check_n_samples
from .forward import (is_fixed_orient, _subject_from_forward,
convert_forward_solution)
from .source_estimate import _make_stc
@verbose
def read_proj(fname, verbose=None):
"""Read projections from a FIF file.
Parameters
----------
fname : str
The name of file containing the projections vectors. It should end with
-proj.fif or -proj.fif.gz.
%(verbose)s
Returns
-------
projs : list
The list of projection vectors.
See Also
--------
write_proj
"""
check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz',
'_proj.fif', '_proj.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
projs = _read_proj(fid, tree)
return projs
@verbose
def write_proj(fname, projs, *, overwrite=False, verbose=None):
"""Write projections to a FIF file.
Parameters
----------
fname : str
The name of file containing the projections vectors. It should end with
-proj.fif or -proj.fif.gz.
projs : list
The list of projection vectors.
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
.. versionadded:: 1.0
See Also
--------
read_proj
"""
fname = _check_fname(fname, overwrite=overwrite)
check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz',
'_proj.fif', '_proj.fif.gz'))
with start_and_end_file(fname) as fid:
_write_proj(fid, projs)
@verbose
def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix,
meg='separate', verbose=None):
from scipy import linalg
grad_ind = pick_types(info, meg='grad', ref_meg=False, exclude='bads')
mag_ind = pick_types(info, meg='mag', ref_meg=False, exclude='bads')
eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
_check_option('meg', meg, ['separate', 'combined'])
if meg == 'combined':
if n_grad != n_mag:
raise ValueError('n_grad (%d) must be equal to n_mag (%d) when '
'using meg="combined"')
kinds = ['meg', '', 'eeg']
n_mag = 0
grad_ind = pick_types(info, meg=True, ref_meg=False, exclude='bads')
if (n_grad > 0) and len(grad_ind) == 0:
logger.info("No MEG channels found for joint estimation. "
"Forcing n_grad=n_mag=0")
n_grad = 0
else:
kinds = ['planar', 'axial', 'eeg']
if (n_grad > 0) and len(grad_ind) == 0:
logger.info("No gradiometers found. Forcing n_grad to 0")
n_grad = 0
if (n_mag > 0) and len(mag_ind) == 0:
logger.info("No magnetometers found. Forcing n_mag to 0")
n_mag = 0
if (n_eeg > 0) and len(eeg_ind) == 0:
logger.info("No EEG channels found. Forcing n_eeg to 0")
n_eeg = 0
ch_names = info['ch_names']
grad_names, mag_names, eeg_names = ([ch_names[k] for k in ind]
for ind in [grad_ind, mag_ind,
eeg_ind])
projs = []
for n, ind, names, desc in zip([n_grad, n_mag, n_eeg],
[grad_ind, mag_ind, eeg_ind],
[grad_names, mag_names, eeg_names],
kinds):
if n == 0:
continue
data_ind = data[ind][:, ind]
# data is the covariance matrix: U * S**2 * Ut
U, Sexp2, _ = linalg.svd(data_ind, full_matrices=False)
U = U[:, :n]
exp_var = Sexp2 / Sexp2.sum()
exp_var = exp_var[:n]
for k, (u, var) in enumerate(zip(U.T, exp_var)):
proj_data = dict(col_names=names, row_names=None,
data=u[np.newaxis, :], nrow=1, ncol=u.size)
this_desc = "%s-%s-PCA-%02d" % (desc, desc_prefix, k + 1)
logger.info("Adding projection: %s" % this_desc)
proj = Projection(
active=False, data=proj_data, desc=this_desc,
kind=FIFF.FIFFV_PROJ_ITEM_FIELD, explained_var=var)
projs.append(proj)
return projs
@verbose
def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=None,
desc_prefix=None, meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on epoched data.
%(compute_ssp)s
Parameters
----------
epochs : instance of Epochs
The epochs containing the artifact.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
%(n_jobs)s
Number of jobs to use to compute covariance.
desc_prefix : str | None
The description prefix to use. If None, one will be created based on
the event_id, tmin, and tmax.
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs: list
List of projection vectors.
See Also
--------
compute_proj_raw, compute_proj_evoked
"""
# compute data covariance
data = _compute_cov_epochs(epochs, n_jobs)
event_id = epochs.event_id
if event_id is None or len(list(event_id.keys())) == 0:
event_id = '0'
elif len(event_id.keys()) == 1:
event_id = str(list(event_id.values())[0])
else:
event_id = 'Multiple-events'
if desc_prefix is None:
desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
def _compute_cov_epochs(epochs, n_jobs):
"""Compute epochs covariance."""
parallel, p_fun, n_jobs = parallel_func(np.dot, n_jobs)
data = parallel(p_fun(e, e.T) for e in epochs)
n_epochs = len(data)
if n_epochs == 0:
raise RuntimeError('No good epochs found')
n_chan, n_samples = epochs.info['nchan'], len(epochs.times)
_check_n_samples(n_samples * n_epochs, n_chan)
data = sum(data)
return data
@verbose
def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, desc_prefix=None,
meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on evoked data.
%(compute_ssp)s
Parameters
----------
evoked : instance of Evoked
The Evoked obtained by averaging the artifact.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
desc_prefix : str | None
The description prefix to use. If None, one will be created based on
tmin and tmax.
.. versionadded:: 0.17
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs : list
List of projection vectors.
See Also
--------
compute_proj_raw, compute_proj_epochs
"""
data = np.dot(evoked.data, evoked.data.T) # compute data covariance
if desc_prefix is None:
desc_prefix = "%-.3f-%-.3f" % (evoked.times[0], evoked.times[-1])
return _compute_proj(data, evoked.info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
@verbose
def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
n_eeg=0, reject=None, flat=None, n_jobs=None,
meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on continuous data.
%(compute_ssp)s
Parameters
----------
raw : instance of Raw
A raw object to use the data from.
start : float
Time (in sec) to start computing SSP.
stop : float
Time (in sec) to stop computing SSP.
None will go to the end of the file.
duration : float
Duration (in sec) to chunk data into for SSP
If duration is None, data will not be chunked.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
%(n_jobs)s
Number of jobs to use to compute covariance.
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs: list
List of projection vectors.
See Also
--------
compute_proj_epochs, compute_proj_evoked
"""
if duration is not None:
duration = np.round(duration * raw.info['sfreq']) / raw.info['sfreq']
events = make_fixed_length_events(raw, 999, start, stop, duration)
picks = pick_types(raw.info, meg=True, eeg=True, eog=True, ecg=True,
emg=True, exclude='bads')
epochs = Epochs(raw, events, None, tmin=0.,
tmax=duration - 1. / raw.info['sfreq'],
picks=picks, reject=reject, flat=flat,
baseline=None, proj=False)
data = _compute_cov_epochs(epochs, n_jobs)
info = epochs.info
if not stop:
stop = raw.n_times / raw.info['sfreq']
else:
# convert to sample indices
start = max(raw.time_as_index(start)[0], 0)
stop = raw.time_as_index(stop)[0] if stop else raw.n_times
stop = min(stop, raw.n_times)
data, times = raw[:, start:stop]
_check_n_samples(stop - start, data.shape[0])
data = np.dot(data, data.T) # compute data covariance
info = raw.info
# convert back to times
start = start / raw.info['sfreq']
stop = stop / raw.info['sfreq']
desc_prefix = "Raw-%-.3f-%-.3f" % (start, stop)
projs = _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
return projs
@verbose
def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
verbose=None):
"""Compute sensitivity map.
Such maps are used to know how much sources are visible by a type
of sensor, and how much projections shadow some sources.
Parameters
----------
fwd : Forward
The forward operator.
projs : list
List of projection vectors.
ch_type : 'grad' | 'mag' | 'eeg'
The type of sensors to use.
mode : str
The type of sensitivity map computed. See manual. Should be 'free',
'fixed', 'ratio', 'radiality', 'angle', 'remaining', or 'dampening'
corresponding to the argument --map 1, 2, 3, 4, 5, 6 and 7 of the
command mne_sensitivity_map.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in fwd['info']['bads'].
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate
The sensitivity map as a SourceEstimate or VolSourceEstimate instance
for visualization.
"""
from scipy import linalg
# check strings
_check_option('ch_type', ch_type, ['eeg', 'grad', 'mag'])
_check_option('mode', mode, ['free', 'fixed', 'ratio', 'radiality',
'angle', 'remaining', 'dampening'])
# check forward
if is_fixed_orient(fwd, orig=True):
raise ValueError('fwd should must be computed with free orientation')
# limit forward (this will make a copy of the data for us)
if ch_type == 'eeg':
fwd = pick_types_forward(fwd, meg=False, eeg=True, exclude=exclude)
else:
fwd = pick_types_forward(fwd, meg=ch_type, eeg=False, exclude=exclude)
convert_forward_solution(fwd, surf_ori=True, force_fixed=False,
copy=False, verbose=False)
if not fwd['surf_ori'] or is_fixed_orient(fwd):
raise RuntimeError('Error converting solution, please notify '
'mne-python developers')
gain = fwd['sol']['data']
# Make sure EEG has average
if ch_type == 'eeg':
if projs is None or not _has_eeg_average_ref_proj(
fwd['info'], projs=projs):
eeg_ave = [make_eeg_average_ref_proj(fwd['info'])]
else:
eeg_ave = []
projs = eeg_ave if projs is None else projs + eeg_ave
# Construct the projector
residual_types = ['angle', 'remaining', 'dampening']
if projs is not None:
proj, ncomp, U = make_projector(projs, fwd['sol']['row_names'],
include_active=True)
# do projection for most types
if mode not in residual_types:
gain = np.dot(proj, gain)
elif ncomp == 0:
raise RuntimeError('No valid projectors found for channel type '
'%s, cannot compute %s' % (ch_type, mode))
# can only run the last couple methods if there are projectors
elif mode in residual_types:
raise ValueError('No projectors used, cannot compute %s' % mode)
n_sensors, n_dipoles = gain.shape
n_locations = n_dipoles // 3
sensitivity_map = np.empty(n_locations)
for k in range(n_locations):
gg = gain[:, 3 * k:3 * (k + 1)]
if mode != 'fixed':
s = linalg.svd(gg, full_matrices=False, compute_uv=False)
if mode == 'free':
sensitivity_map[k] = s[0]
else:
gz = linalg.norm(gg[:, 2]) # the normal component
if mode == 'fixed':
sensitivity_map[k] = gz
elif mode == 'ratio':
sensitivity_map[k] = gz / s[0]
elif mode == 'radiality':
sensitivity_map[k] = 1. - (gz / s[0])
else:
if mode == 'angle':
co = linalg.norm(np.dot(gg[:, 2], U))
sensitivity_map[k] = co / gz
else:
p = linalg.norm(np.dot(proj, gg[:, 2]))
if mode == 'remaining':
sensitivity_map[k] = p / gz
elif mode == 'dampening':
sensitivity_map[k] = 1. - p / gz
else:
raise ValueError('Unknown mode type (got %s)' % mode)
# only normalize fixed and free methods
if mode in ['fixed', 'free']:
sensitivity_map /= np.max(sensitivity_map)
subject = _subject_from_forward(fwd)
vertices = [s['vertno'] for s in fwd['src']]
return _make_stc(sensitivity_map[:, np.newaxis], vertices, fwd['src'].kind,
tmin=0., tstep=1., subject=subject)
| {
"content_hash": "dac3a5422c8be8fe998a44399ea22403",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 79,
"avg_line_length": 35.275109170305676,
"alnum_prop": 0.5617108195097796,
"repo_name": "Teekuningas/mne-python",
"id": "8bdd80cdb1f1f29eeb34864d422ee3a5e93fcd47",
"size": "16243",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mne/proj.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "14962"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "10372316"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19970"
}
],
"symlink_target": ""
} |
import sys
import time as tmod
import warnings
import numpy as np
warnings.simplefilter("ignore")
sys.path.insert(0, "../FATS/")
import FATS
#We open the ligth curve in two different bands
lc_B = FATS.ReadLC_MACHO('lc/lc_1.3444.614.B.txt')
lc_R = FATS.ReadLC_MACHO('lc/lc_1.3444.614.R.txt')
#We import the data
[mag, time, error] = lc_B.ReadLC()
[mag2, time2, error2] = lc_R.ReadLC()
#We preprocess the data
preproccesed_data = FATS.Preprocess_LC(mag, time, error)
[mag, time, error] = preproccesed_data.Preprocess()
preproccesed_data = FATS.Preprocess_LC(mag2, time2, error2)
[mag2, time2, error2] = preproccesed_data.Preprocess()
#We synchronize the data
if len(mag) != len(mag2):
[aligned_mag, aligned_mag2, aligned_time, aligned_error, aligned_error2] = \
FATS.Align_LC(time, time2, mag, mag2, error, error2)
lc = np.array([mag, time, error, mag2, aligned_mag, aligned_mag2, aligned_time, aligned_error, aligned_error2])
EXCLUDE = [
'Freq1_harmonics_amplitude_0','Freq1_harmonics_amplitude_1',
'Freq1_harmonics_amplitude_2','Freq1_harmonics_amplitude_3',
'Freq2_harmonics_amplitude_0','Freq2_harmonics_amplitude_1',
'Freq2_harmonics_amplitude_2','Freq2_harmonics_amplitude_3',
'Freq3_harmonics_amplitude_0','Freq3_harmonics_amplitude_1',
'Freq3_harmonics_amplitude_2','Freq3_harmonics_amplitude_3',
'Freq1_harmonics_amplitude_0','Freq1_harmonics_rel_phase_0',
'Freq1_harmonics_rel_phase_1','Freq1_harmonics_rel_phase_2',
'Freq1_harmonics_rel_phase_3','Freq2_harmonics_rel_phase_0',
'Freq2_harmonics_rel_phase_1','Freq2_harmonics_rel_phase_2',
'Freq2_harmonics_rel_phase_3','Freq3_harmonics_rel_phase_0',
'Freq3_harmonics_rel_phase_1','Freq3_harmonics_rel_phase_2',
'Freq3_harmonics_rel_phase_3', "Period_fit", "Psi_eta", "Psi_CS"]
iterations = 1000
times_pls = []
fs = FATS.FeatureSpace(
Data='all', excludeList=EXCLUDE)
for _ in range(iterations):
start = tmod.time()
fs.calculateFeature(lc)
times_pls.append(tmod.time() - start)
times = []
fs = FATS.FeatureSpace(
Data='all', excludeList=EXCLUDE + ["PeriodLS"])
for _ in range(iterations):
start = tmod.time()
fs.calculateFeature(lc)
times.append(tmod.time() - start)
msg = """
Total iterations: {iterations}
With PeriodLS:
- Total: {total_pls}
- Minimun: {min_pls}
- Maximun: {max_pls}
- Mean: {mean_pls}
- Std: {std_pls}
Without PeriodLS:
- Total: {total}
- Minimun: {min}
- Maximun: {max}
- Mean: {mean}
- Std: {std}
""".format(
iterations=iterations,
total_pls=np.sum(times_pls), min_pls=np.min(times_pls),
max_pls=np.max(times_pls), mean_pls=np.mean(times_pls),
std_pls=np.std(times_pls),
total=np.sum(times), min=np.min(times),
max=np.max(times), mean=np.mean(times),
std=np.std(times))
with open("lombscargle_test.txt", "w") as fp:
fp.write(msg)
| {
"content_hash": "886f9feb5b9704eae481db7e9761cefd",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 111,
"avg_line_length": 29.540816326530614,
"alnum_prop": 0.6773747841105354,
"repo_name": "carpyncho/feets",
"id": "ed130147a51ebc25da32804b4679131456c1f32b",
"size": "2896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "res/paper/reports/lomb_scargle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "108441"
},
{
"name": "Python",
"bytes": "336059"
},
{
"name": "Shell",
"bytes": "2414"
},
{
"name": "TeX",
"bytes": "59406"
}
],
"symlink_target": ""
} |
import uuid
import ddt
import mock
from oslo_utils import strutils
from oslo_utils import timeutils
import six
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import group_specs as v3_group_specs
from cinder.api.v3 import group_types as v3_group_types
from cinder.api.v3.views import group_types as views_types
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import group_types
IN_USE_GROUP_TYPE = fake.GROUP_TYPE3_ID
def stub_group_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='group_type_%s' % six.text_type(id),
description='group_type_desc_%s' % six.text_type(id),
group_specs=specs,
)
def return_group_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
result = dict(group_type_1=stub_group_type(1),
group_type_2=stub_group_type(2),
group_type_3=stub_group_type(3)
)
if list_result:
return list(result.values())
return result
def return_empty_group_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
if list_result:
return []
return {}
def return_group_types_get_group_type(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.GroupTypeNotFound(group_type_id=id)
return stub_group_type(id)
def return_group_types_get_default():
return stub_group_type(1)
def return_group_types_get_default_not_found():
return {}
def return_group_types_with_groups_destroy(context, id):
if id == IN_USE_GROUP_TYPE:
raise exception.GroupTypeInUse(group_type_id=id)
@ddt.ddt
class GroupTypesApiTest(test.TestCase):
def _create_group_type(self, group_type_name, group_specs=None,
is_public=True, projects=None):
return group_types.create(self.ctxt, group_type_name, group_specs,
is_public, projects).get('id')
def setUp(self):
super(GroupTypesApiTest, self).setUp()
self.controller = v3_group_types.GroupTypesController()
self.specs_controller = v3_group_specs.GroupTypeSpecsController()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
self.user_ctxt = context.RequestContext(user_id=fake.USER2_ID,
project_id=fake.PROJECT2_ID,
is_admin=False)
self.type_id1 = self._create_group_type('group_type1',
{'key1': 'value1'})
self.type_id2 = self._create_group_type('group_type2',
{'key2': 'value2'})
self.type_id3 = self._create_group_type('group_type3',
{'key3': 'value3'}, False,
[fake.PROJECT_ID])
self.type_id0 = group_types.get_default_cgsnapshot_type()['id']
@ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on',
'y', 'yes')
@mock.patch.object(group_types, "get_group_type_by_name")
@mock.patch.object(group_types, "create")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_create_group_type_with_valid_is_public_in_string(
self, is_public, mock_show, mock_cache_resource,
mock_create, mock_get):
boolean_is_public = strutils.bool_from_string(is_public)
req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"is_public": is_public, "name": "group_type1",
"description": None}}
self.controller.create(req, body=body)
mock_create.assert_called_once_with(
self.ctxt, 'group_type1', {},
boolean_is_public, description=None)
@mock.patch.object(group_types, "get_group_type_by_name")
@mock.patch.object(group_types, "create")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_create_group_type_with_group_specs_null(
self, mock_show, mock_cache_resource,
mock_create, mock_get):
req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"name": "group_type1",
"group_specs": None}}
self.controller.create(req, body=body)
mock_create.assert_called_once_with(
self.ctxt, 'group_type1', None, True, description=None)
@ddt.data(fake.GROUP_TYPE_ID, IN_USE_GROUP_TYPE)
def test_group_type_destroy(self, grp_type_id):
grp_type = {'id': grp_type_id, 'name': 'grp' + grp_type_id}
self.mock_object(group_types, 'get_group_type',
return_value=grp_type)
self.mock_object(group_types, 'destroy',
return_group_types_with_groups_destroy)
mock_notify_info = self.mock_object(
v3_group_types.GroupTypesController,
'_notify_group_type_info')
mock_notify_error = self.mock_object(
v3_group_types.GroupTypesController,
'_notify_group_type_error')
req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % (
fake.PROJECT_ID, grp_type_id),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
if grp_type_id == IN_USE_GROUP_TYPE:
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete,
req, grp_type_id)
mock_notify_error.assert_called_once_with(
self.ctxt, 'group_type.delete', mock.ANY,
group_type=grp_type)
else:
self.controller.delete(req, grp_type_id)
mock_notify_info.assert_called_once_with(
self.ctxt, 'group_type.delete', grp_type)
def test_group_types_index(self):
self.mock_object(group_types, 'get_all_group_types',
return_group_types_get_all_types)
req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID,
use_admin_context=True,
version=mv.GROUP_TYPE)
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['group_types']))
expected_names = ['group_type_1', 'group_type_2', 'group_type_3']
actual_names = map(lambda e: e['name'], res_dict['group_types'])
self.assertEqual(set(expected_names), set(actual_names))
for entry in res_dict['group_types']:
self.assertEqual('value1', entry['group_specs']['key1'])
def test_group_types_index_no_data(self):
self.mock_object(group_types, 'get_all_group_types',
return_empty_group_types_get_all_types)
req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['group_types']))
def test_group_types_index_with_limit(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
expect_next_link = ('http://localhost/v3/%s/group_types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID, res['group_types'][0]['id']))
self.assertEqual(expect_next_link, res['group_type_links'][0]['href'])
def test_group_types_index_with_offset(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?offset=1' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(3, len(res['group_types']))
def test_group_types_index_with_offset_out_of_range(self):
url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=mv.GROUP_TYPE)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_group_types_index_with_limit_and_offset(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?limit=2&offset=1' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(self.type_id2, res['group_types'][0]['id'])
self.assertEqual(self.type_id1, res['group_types'][1]['id'])
def test_group_types_index_with_limit_and_marker(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID,
self.type_id2),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['group_types']))
self.assertEqual(self.type_id1, res['group_types'][0]['id'])
def test_group_types_index_with_valid_filter(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?is_public=True' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(4, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
self.assertEqual(self.type_id2, res['group_types'][1]['id'])
self.assertEqual(self.type_id1, res['group_types'][2]['id'])
self.assertEqual(self.type_id0, res['group_types'][3]['id'])
def test_group_types_index_with_invalid_filter(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?id=%s' % (fake.PROJECT_ID, self.type_id1),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(4, len(res['group_types']))
def test_group_types_index_with_sort_keys(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
def test_group_types_index_with_sort_and_limit(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?sort=id&limit=2' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
def test_group_types_index_with_sort_keys_and_sort_dirs(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?sort=id:asc' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort()
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
@ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on',
'y', 'yes')
@mock.patch.object(group_types, "get_group_type")
@mock.patch.object(group_types, "update")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_update_group_type_with_valid_is_public_in_string(
self, is_public, mock_show, mock_cache_resource,
mock_update, mock_get):
type_id = six.text_type(uuid.uuid4())
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, type_id),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
boolean_is_public = strutils.bool_from_string(is_public)
body = {"group_type": {"is_public": is_public, "name": "group_type1"}}
self.controller.update(req, type_id, body=body)
mock_update.assert_called_once_with(
self.ctxt, type_id, 'group_type1', None,
is_public=boolean_is_public)
def test_update_group_type_with_name_null(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"name": None}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, fake.GROUP_TYPE_ID, body=body)
@ddt.data({"group_type": {"name": None,
"description": "description"}},
{"group_type": {"name": "test",
"is_public": True}},
{"group_type": {"description": None,
"is_public": True}})
def test_update_group_type(self, body):
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID),
version=mv.GROUP_TYPE)
group_type_1 = group_types.create(self.ctxt, 'group_type')
req.environ['cinder.context'] = self.ctxt
res = self.controller.update(req, group_type_1.get('id'), body=body)
expected_name = body['group_type'].get('name')
if expected_name is not None:
self.assertEqual(expected_name, res['group_type']['name'])
expected_is_public = body['group_type'].get('is_public')
if expected_is_public is not None:
self.assertEqual(expected_is_public,
res['group_type']['is_public'])
self.assertEqual(body['group_type'].get('description'),
res['group_type']['description'])
def test_group_types_show(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
type_id = six.text_type(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID
+ type_id,
version=mv.GROUP_TYPE)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['group_type']['id'])
type_name = 'group_type_' + type_id
self.assertEqual(type_name, res_dict['group_type']['name'])
def test_group_types_show_pre_microversion(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
type_id = uuid.uuid4()
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types/%s' % (fake.PROJECT_ID, type_id),
version=mv.get_prior_version(mv.GROUP_TYPE))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, req, type_id)
def test_group_types_show_not_found(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=mv.GROUP_TYPE)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_get_default(self):
self.mock_object(group_types, 'get_default_group_type',
return_group_types_get_default)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('group_type_1', res_dict['group_type']['name'])
self.assertEqual('group_type_desc_1',
res_dict['group_type']['description'])
def test_get_default_not_found(self):
self.mock_object(group_types, 'get_default_group_type',
return_group_types_get_default_not_found)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.method = 'GET'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3", use_admin_context=True,
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
group_specs={},
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def __test_view_builder_show_qos_specs_id_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[False, True]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_group_specs_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
group_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_pass_all_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
group_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_types = []
for i in range(0, 10):
raw_group_types.append(
dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.index(request, raw_group_types)
self.assertIn('group_types', output)
for i in range(0, 10):
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42 + i
)
self.assertDictEqual(expected_group_type,
output['group_types'][i])
def test_view_builder_list_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_types = []
for i in range(0, 10):
raw_group_types.append(
dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v3", use_admin_context=True,
version=mv.GROUP_TYPE)
output = view_builder.index(request, raw_group_types)
self.assertIn('group_types', output)
for i in range(0, 10):
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
group_specs={},
id=42 + i
)
self.assertDictEqual(expected_group_type,
output['group_types'][i])
| {
"content_hash": "6df14a978969b5b69a6c76b291875276",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 78,
"avg_line_length": 40.746246246246244,
"alnum_prop": 0.5309724730073332,
"repo_name": "j-griffith/cinder",
"id": "9f6d7d25c256102e2bccaae27c2cd46a074df847",
"size": "27768",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/api/v3/test_group_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20155959"
},
{
"name": "Shell",
"bytes": "16354"
}
],
"symlink_target": ""
} |
"""
===============================
Computing source space SNR
===============================
This example shows how to compute and plot source space SNR as in [1]_.
"""
# Author: Padma Sundaram <[email protected]>
# Kaisu Lankinen <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 2
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
import numpy as np
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
# Read data
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fname_fwd)
cov = mne.read_cov(fname_cov)
###############################################################################
# MEG-EEG
# -------
# Read inverse operator:
inv_op = make_inverse_operator(evoked.info, fwd, cov, fixed=True, verbose=True)
# Calculate MNE:
snr = 3.0
lambda2 = 1.0 / snr ** 2
stc = apply_inverse(evoked, inv_op, lambda2, 'MNE', verbose=True)
# Calculate SNR in source space:
snr_stc = stc.estimate_snr(evoked.info, fwd, cov)
# Plot an average SNR across source points over time:
ave = np.mean(snr_stc.data, axis=0)
fig, ax = plt.subplots()
ax.plot(evoked.times, ave)
ax.set(xlabel='Time (sec)', ylabel='SNR MEG-EEG')
fig.tight_layout()
# Find time point of maximum SNR:
maxidx = np.argmax(ave)
# Plot SNR on source space at the time point of maximum SNR:
kwargs = dict(initial_time=evoked.times[maxidx], hemi='split',
views=['lat', 'med'], subjects_dir=subjects_dir, size=(600, 600),
clim=dict(kind='value', lims=(-100, -70, -40)),
transparent=True, colormap='viridis')
snr_stc.plot(**kwargs)
###############################################################################
# EEG
# ---
# Next we do the same for EEG and plot the result on the cortex:
evoked_eeg = evoked.copy().pick_types(eeg=True, meg=False)
inv_op_eeg = make_inverse_operator(evoked_eeg.info, fwd, cov, fixed=True,
verbose=True)
stc_eeg = apply_inverse(evoked_eeg, inv_op_eeg, lambda2, 'MNE', verbose=True)
snr_stc_eeg = stc_eeg.estimate_snr(evoked_eeg.info, fwd, cov)
snr_stc_eeg.plot(**kwargs)
###############################################################################
# MEG
# ---
# Finally we do this for MEG:
evoked_meg = evoked.copy().pick_types(eeg=False, meg=True)
inv_op_meg = make_inverse_operator(evoked_meg.info, fwd, cov, fixed=True,
verbose=True)
stc_meg = apply_inverse(evoked_meg, inv_op_meg, lambda2, 'MNE', verbose=True)
snr_stc_meg = stc_meg.estimate_snr(evoked_meg.info, fwd, cov)
snr_stc_meg.plot(**kwargs)
##############################################################################
# References
# ----------
# .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon, D.,
# Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009). Mapping the
# Signal-To-Noise-Ratios of Cortical Sources in Magnetoencephalography
# and Electroencephalography. Human Brain Mapping, 30(4), 1077–1086.
# doi:10.1002/hbm.20571
| {
"content_hash": "84d7897d40dc5377c1c3f5325c5e0321",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5926573426573427,
"repo_name": "cjayb/mne-python",
"id": "4191665d597e80a26141f5f15c13626303e958ac",
"size": "3461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/inverse/plot_source_space_snr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "7901053"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
DataFrame,
Period,
Series,
Timestamp,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
@pytest.fixture
def date_range_frame():
"""
Fixture for DataFrame of ints with date_range index
Columns are ['A', 'B'].
"""
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
return DataFrame({"A": np.arange(N), "B": np.arange(N)}, index=rng)
class TestFrameAsof:
def test_basic(self, date_range_frame):
df = date_range_frame
N = 50
df.loc[df.index[15:30], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = df.asof(dates)
assert result.notna().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notna().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(1).all()
def test_subset(self, date_range_frame):
N = 10
df = date_range_frame.iloc[:N].copy()
df.loc[df.index[4:8], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
# with a subset of A should be the same
result = df.asof(dates, subset="A")
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=["A", "B"])
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# B gives df.asof
result = df.asof(dates, subset="B")
expected = df.resample("25s", closed="right").ffill().reindex(dates)
expected.iloc[20:] = 9
tm.assert_frame_equal(result, expected)
def test_missing(self, date_range_frame):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
df = date_range_frame.iloc[:N].copy()
result = df.asof("1989-12-31")
expected = Series(
index=["A", "B"], name=Timestamp("1989-12-31"), dtype=np.float64
)
tm.assert_series_equal(result, expected)
result = df.asof(to_datetime(["1989-12-31"]))
expected = DataFrame(
index=to_datetime(["1989-12-31"]), columns=["A", "B"], dtype="float64"
)
tm.assert_frame_equal(result, expected)
# Check that we handle PeriodIndex correctly, dont end up with
# period.ordinal for series name
df = df.to_period("D")
result = df.asof("1989-12-31")
assert isinstance(result.name, Period)
def test_asof_all_nans(self, frame_or_series):
# GH 15713
# DataFrame/Series is all nans
result = frame_or_series([np.nan]).asof([0])
expected = frame_or_series([np.nan])
tm.assert_equal(result, expected)
def test_all_nans(self, date_range_frame):
# GH 15713
# DataFrame is all nans
# testing non-default indexes, multiple inputs
N = 150
rng = date_range_frame.index
dates = date_range("1/1/1990", periods=N, freq="25s")
result = DataFrame(np.nan, index=rng, columns=["A"]).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=["A"])
tm.assert_frame_equal(result, expected)
# testing multiple columns
dates = date_range("1/1/1990", periods=N, freq="25s")
result = DataFrame(np.nan, index=rng, columns=["A", "B", "C"]).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
# testing scalar input
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof([3])
expected = DataFrame(np.nan, index=[3], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof(3)
expected = Series(np.nan, index=["A", "B"], name=3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"stamp,expected",
[
(
Timestamp("2018-01-01 23:22:43.325+00:00"),
Series(2.0, name=Timestamp("2018-01-01 23:22:43.325+00:00")),
),
(
Timestamp("2018-01-01 22:33:20.682+01:00"),
Series(1.0, name=Timestamp("2018-01-01 22:33:20.682+01:00")),
),
],
)
def test_time_zone_aware_index(self, stamp, expected):
# GH21194
# Testing awareness of DataFrame index considering different
# UTC and timezone
df = DataFrame(
data=[1, 2],
index=[
Timestamp("2018-01-01 21:00:05.001+00:00"),
Timestamp("2018-01-01 22:35:10.550+00:00"),
],
)
result = df.asof(stamp)
tm.assert_series_equal(result, expected)
def test_is_copy(self, date_range_frame):
# GH-27357, GH-30784: ensure the result of asof is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = date_range_frame
N = 50
df.loc[df.index[15:30], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = df.asof(dates)
with tm.assert_produces_warning(None):
result["C"] = 1
def test_asof_periodindex_mismatched_freq(self):
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
df = DataFrame(np.random.randn(N), index=rng)
# Mismatched freq
msg = "Input has different freq"
with pytest.raises(IncompatibleFrequency, match=msg):
df.asof(rng.asfreq("D"))
| {
"content_hash": "b922e5d8fdbadd89a2cf5fdc0f7fecad",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 84,
"avg_line_length": 32.324175824175825,
"alnum_prop": 0.561958184599694,
"repo_name": "gfyoung/pandas",
"id": "6931dd0ea2d4c2d01972ca3c03e0a60a48ecb1f6",
"size": "5883",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pandas/tests/frame/methods/test_asof.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
import os
import sys
import yaml
import logging
import json
import re
import argparse
import datetime
import salt.client
import device42
from nodefilter import node_filter
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter('%(asctime)-15s\t%(levelname)s\t %(message)s'))
logger.addHandler(ch)
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser(description="saltexplore")
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output')
parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode - outputs only errors')
parser.add_argument('-c', '--config', help='Config file', default='settings.yaml')
parser.add_argument('-f', '--nodefile', help='Get node info from JSON file instead of Salt server')
parser.add_argument('-S', '--savenodes', help='Save nodes info from Salt server to json file')
parser.add_argument('-n', '--onlynode', action='append', help='Process only selected nodes (fqdn or hostname)')
debug_mode = False
cpuf_re = re.compile(r'@ ([\w\d\.]+)GHz', re.I)
# We have to restrict FS to only known types to avoid incorrect disk size calculatons
# add more yourself
ALLOWED_FSTYPES = ['ntfs', 'ext2', 'ext3', 'ext4', 'ocfs2', 'xfs', 'zfs', 'jfs',
'vfat', 'msdos', 'reiser4', 'reiserfs']
def get_config(cfgpath):
if not os.path.exists(cfgpath):
if not os.path.exists(os.path.join(CUR_DIR, cfgpath)):
raise ValueError("Config file %s is not found!" % cfgpath)
cfgpath = os.path.join(CUR_DIR, cfgpath)
with open(cfgpath, 'r') as cfgf:
config = yaml.load(cfgf.read())
return config
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.strftime("%Y %m %d %H:%M:%S")
return json.JSONEncoder.default(self, o)
def d42_insert(dev42, nodes, options, static_opt):
# get customer info
customer_name = static_opt.get('customer')
customer_id = str(static_opt.get('customer_id') or '') or None
if (not customer_id and customer_name) or (customer_id and not customer_name):
all_customers = dev42._get('customers')['Customers']
for cst in all_customers:
if customer_id and str(cst['id']) == customer_id:
customer_name = cst['name']
break
if customer_name and cst['name'] == customer_name:
customer_id = str(cst['id'])
break
logger.debug("Customer %s: '%s'" % (customer_id, customer_name))
# processing all nodes
for node in [nodes[x] for x in nodes]:
if not node:
logger.debug("Skip node: no proper node data")
continue
if 'nodename' not in node:
logger.debug("Skip node: no name found")
continue
node_name = node['nodename']
if options.get('as_node_name').upper() == 'FQDN':
node_name = node.get('fqdn', node_name)
# filtering by attributes
if options.get('node_filter'):
if not node_filter(node, options['node_filter']):
logger.info("Skip node %s: filter not passed" % node_name)
continue # filter not passed
try:
# device = dev42.get_device_by_name(node_name)
# detect memory
totalmem = int(float(node['mem_total']))
cpupower = 0
cpus = node['num_cpus']
cpupowers = cpuf_re.findall(node['cpu_model'])
if cpupowers:
cpupower = int(float(cpupowers[0]) * 1000)
data = {
'name': node_name,
'os': node['os'],
'osver': node['osrelease'],
'cpupower': cpupower,
'memory': totalmem,
'cpucore': cpus,
'manufacturer': node['manufacturer'],
'customer': customer_name,
'service_level': static_opt.get('service_level')
}
uuid = None
if 'machine_id' in node:
uuid = node['machine_id']
if not uuid and 'uuid' in node:
uuid = node['uuid']
if uuid:
data.update({'uuid': uuid})
serial_no = None
if 'serialnumber' in node:
serial_no = node['serialnumber']
if not serial_no and 'system_serialnumber' in node:
serial_no = node['system_serialnumber']
if serial_no:
data.update({'serial_no': serial_no})
nodetype = 'physical'
virtual_subtype = None
is_virtual = 'no'
if node['virtual'] != nodetype:
is_virtual = 'yes'
nodetype = 'virtual'
if 'virtual_subtype' in node:
virtual_subtype = node['virtual_subtype']
else:
virtual_subtype = node['virtual']
if virtual_subtype is not None:
data.update({'virtual_subtype': virtual_subtype})
data.update({
'type': nodetype,
'is_it_virtual_host': is_virtual
})
osarch = None
if 'osarch' in node and '64' in node['osarch']:
osarch = 64
if 'osarch' in node and '86' in node['osarch']:
osarch = 32
if osarch is not None:
data.update({'osarch': osarch})
# detect disks
if 'disks' in node:
hdd_count = 0
hdd_size = 0
disks = {}
if node['id'] in node['disks'] and type(node['disks'][node['id']]) == dict:
# get unique
for disk in node['disks'][node['id']]:
disk = node['disks'][node['id']][disk]
if 'UUID' in disk and disk['UUID'] not in disks:
disks[disk['UUID']] = disk
for disk in disks:
if 'TYPE' in disks[disk] and disks[disk]['TYPE'].lower() in ALLOWED_FSTYPES:
hdd_count += 1
if 'usage' in node and node['id'] in node['usage'] and type(node['usage'][node['id']] == dict):
for disk in node['usage'][node['id']]:
disk = node['usage'][node['id']][disk]
if 'filesystem' in disk and disk['filesystem'] in node['disks'][node['id']] and '1K-blocks' in disk:
hdd_size += int(disk['1K-blocks'])
data.update({'hddcount': hdd_count, 'hddsize': float(hdd_size) / (1024 * 1024)})
if 'cpus' in node:
if type(node['cpus'][node['id']]) == dict:
data.update({'cpucount': int(node['cpus'][node['id']]['physical id']) + 1})
if options.get('hostname_precedence'):
data.update({'new_name': node_name})
logger.debug("Updating node %s" % node_name)
updateinfo = dev42.update_device(**data)
deviceid = updateinfo['msg'][1]
logger.info("Device %s updated/created (id %s)" % (node_name, deviceid))
cfdata = {
'name': node_name,
'key': 'Salt Node ID',
'value': node_name,
'notes': 'Salt Master Server %s' % node['master']
}
dev42._put('device/custom_field', cfdata)
# Dealing with IPs
device_ips = dev42._get("ips", data={'device': node_name})['ips']
updated_ips = []
if node.get('hwaddr_interfaces'):
for ifsname, ifs in node.get('hwaddr_interfaces').items():
if ifsname.startswith('lo'):
continue
dev42._put('device', {
'device_id': deviceid,
'macaddress': ifs
})
if node.get('ip_interfaces') and node.get('hwaddr_interfaces'):
for ifsname, ifs in node.get('ip_interfaces').items():
if ifsname.startswith('lo') or ifsname.startswith('tun') or ifsname.startswith('tap'):
continue # filter out local and tunnel
for ip in ifs:
if ip.startswith('127.0'):
continue # local loopbacks
if ip.lower().startswith('fe80'):
continue # local loopbacks
if ifsname not in node.get('hwaddr_interfaces'):
continue
ipdata = {
'ipaddress': ip,
'tag': ifsname,
'device_id': deviceid,
'macaddress': node.get('hwaddr_interfaces')[ifsname]
}
# logger.debug("IP data: %s" % ipdata)
updateinfo = dev42._post('ips', ipdata)
updated_ips.append(updateinfo['msg'][1])
logger.info("IP %s for device %s updated/created (id %s)" % (ip, node_name, deviceid))
# Delete other IPs from the device
if updated_ips:
for d_ip in device_ips:
if d_ip['id'] not in updated_ips:
dev42._delete('ips/%s/' % d_ip['id'])
logger.debug("Deleted IP %s (id %s) for device %s (id %s)" %
(d_ip['ip'], d_ip['id'], node_name, deviceid))
except Exception as e:
logger.exception("Error (%s) updating device %s" % (type(e), node_name))
def main():
global debug_mode
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
debug_mode = True
if args.quiet:
logger.setLevel(logging.ERROR)
debug_mode = False
config = get_config(args.config)
local = salt.client.LocalClient()
if not args.nodefile:
if args.onlynode:
salt_nodes = local.cmd(args.onlynode[0], 'grains.items', expr_form='list')
else:
salt_nodes = local.cmd('*', 'grains.items')
else:
with open(args.nodefile, 'r') as nf:
all_nodes = json.loads(nf.read())
if isinstance(all_nodes, dict):
all_nodes = [all_nodes]
salt_nodes = all_nodes[0]
if args.onlynode:
salt_nodes = {}
for key, node in all_nodes[0].items():
if node.get('nodename') in args.onlynode[0] or node.get('fqdn') in args.onlynode[0]:
salt_nodes[key] = node
logger.debug("Got %s nodes from file" % len(salt_nodes))
for node in salt_nodes:
try:
if not node:
continue
if type(salt_nodes[node]) != dict:
continue
salt_nodes[node]['disks'] = local.cmd(node, 'disk.blkid')
salt_nodes[node]['usage'] = local.cmd(node, 'disk.usage')
salt_nodes[node]['cpus'] = local.cmd(node, 'status.cpuinfo')
except Exception as e:
logger.exception("Error (%s) getting device information %s" % (type(e), node))
if args.savenodes:
with open(args.savenodes, 'w') as wnf:
wnf.write(json.dumps(salt_nodes, cls=JSONEncoder, indent=4, sort_keys=True, ensure_ascii=False))
dev42 = device42.Device42(
endpoint=config['device42']['host'],
user=config['device42']['user'],
password=config['device42']['pass'],
logger=logger,
debug=debug_mode
)
d42_insert(dev42, salt_nodes, config['options'], config.get('static', {}))
return 0
if __name__ == "__main__":
ret_val = main()
print 'Done'
sys.exit(ret_val)
| {
"content_hash": "3de17772ed1f4ea42c9b792e2871bb90",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 128,
"avg_line_length": 38.71382636655949,
"alnum_prop": 0.5132890365448505,
"repo_name": "device42/salt_to_device42_sync",
"id": "106761b78634a7790fc4c284d4905300707d1d6a",
"size": "12065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saltexplore.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18912"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.azurestack import AzureStackManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-azurestack
# USAGE
python put.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureStackManagementClient(
credential=DefaultAzureCredential(),
subscription_id="dd8597b4-8739-4467-8b10-f8679f62bfbf",
)
response = client.customer_subscriptions.create(
resource_group="azurestack",
registration_name="testregistration",
customer_subscription_name="E09A4E93-29A7-4EBA-A6D4-76202383F07F",
customer_creation_parameters={"properties": {"tenantId": "dbab3982-796f-4d03-9908-044c08aef8a2"}},
)
print(response)
# x-ms-original-file: specification/azurestack/resource-manager/Microsoft.AzureStack/stable/2022-06-01/examples/CustomerSubscription/Put.json
if __name__ == "__main__":
main()
| {
"content_hash": "790034b1d9f9f9e9b4e7b3a69ee78b8b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 141,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.737984496124031,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a7067b5dacab3b7c39a3a2bc41a64a090b1eca8e",
"size": "1758",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/azurestack/azure-mgmt-azurestack/generated_samples/put.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("XGBClassifier" , "BinaryClass_100" , "mssql")
| {
"content_hash": "3915fb32aeb5fdd466eb8dbe8c06b1f0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35.75,
"alnum_prop": 0.7832167832167832,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "d968aeaac0627f1d93e8eb61b45af17344997c8c",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BinaryClass_100/ws_BinaryClass_100_XGBClassifier_mssql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
from aws import Action
service_name = 'Amazon CloudWatch Logs'
prefix = 'logs'
CreateLogGroup = Action(prefix, 'CreateLogGroup')
CreateLogStream = Action(prefix, 'CreateLogStream')
DeleteLogGroup = Action(prefix, 'DeleteLogGroup')
DeleteLogStream = Action(prefix, 'DeleteLogStream')
DeleteMetricFilter = Action(prefix, 'DeleteMetricFilter')
DeleteRetentionPolicy = Action(prefix, 'DeleteRetentionPolicy')
DescribeLogGroups = Action(prefix, 'DescribeLogGroups')
DescribeLogStreams = Action(prefix, 'DescribeLogStreams')
DescribeMetricFilters = Action(prefix, 'DescribeMetricFilters')
GetLogEvents = Action(prefix, 'GetLogEvents')
PutLogEvents = Action(prefix, 'PutLogEvents')
PutMetricFilter = Action(prefix, 'PutMetricFilter')
PutRetentionPolicy = Action(prefix, 'PutRetentionPolicy')
TestMetricFilter = Action(prefix, 'TestMetricFilter')
| {
"content_hash": "a60cb722de29ccd3bc5cd97584a5b431",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 44.26315789473684,
"alnum_prop": 0.8085612366230678,
"repo_name": "craigbruce/awacs",
"id": "33e7612466a02bc95cc87bd8c50ef87ed836909d",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awacs/logs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "94728"
}
],
"symlink_target": ""
} |
import copy
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import rulemanager
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class RouterMixin(object):
@test.create_stubs({
api.neutron: ('router_get', 'port_list',
'network_get', 'is_extension_supported'),
})
def _get_detail(self, router, extraroute=True):
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(extraroute)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
return res
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
class RouterTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index_router_list_exception(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_set_external_network_empty(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).MultipleTimes().AndReturn([router])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
def test_router_detail(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_delete(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_remove_interface',
'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_with_interface_delete(self):
router = self.routers.first()
ports = self.ports.list()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn(ports)
for port in ports:
api.neutron.router_remove_interface(IsA(http.HttpRequest),
router.id, port_id=port.id)
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
class RouterActionTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_mode_server_default(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(True)
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'server_default',
'ha': 'server_default',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_dvr_ha_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(True)
param = {'name': router.name,
'distributed': True,
'ha': True,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **param)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'distributed',
'ha': 'enabled',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_exception_error_case_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
self.exceptions.neutron.status_code = 409
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_exception_error_case_non_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 999
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'get_feature_permission')})
def _test_router_update_get(self, dvr_enabled=False,
current_dvr=False,
ha_enabled=False):
router = [r for r in self.routers.list()
if r.distributed == current_dvr][0]
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(dvr_enabled)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(ha_enabled)
self.mox.ReplayAll()
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
return self.client.get(url)
def test_router_update_get_dvr_disabled(self):
res = self._test_router_update_get(dvr_enabled=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertNotContains(res, 'Router Type')
self.assertNotContains(res, 'id="id_mode"')
def test_router_update_get_dvr_enabled_mode_centralized(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
# Check both menu are displayed.
self.assertContains(
res,
'<option value="centralized" selected="selected">'
'Centralized</option>',
html=True)
self.assertContains(
res,
'<option value="distributed">Distributed</option>',
html=True)
def test_router_update_get_dvr_enabled_mode_distributed(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=True)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
self.assertContains(
res,
'<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" value="distributed" />',
html=True)
self.assertNotContains(res, 'centralized')
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_disabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(False)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(False)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_enabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(True)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(True)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up,
# ha=True,
distributed=True).AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up,
'mode': 'distributed',
'ha': True}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
def _test_router_addinterface(self, raise_error=False):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
add_interface = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, subnet_id=subnet.id)
if raise_error:
add_interface.AndRaise(self.exceptions.neutron)
else:
add_interface.AndReturn({'subnet_id': subnet.id,
'port_id': port.id})
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
self._check_router_addinterface(router, subnet)
def _check_router_addinterface(self, router, subnet, ip_address=''):
# mock APIs used to show router detail
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest), device_id=router.id)\
.AndReturn([])
self._mock_network_list(router['tenant_id'])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'subnet_id': subnet.id,
'ip_address': ip_address}
url = reverse('horizon:%s:routers:addinterface' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'port_get',
'network_list',
'port_list')})
def test_router_addinterface(self):
self._test_router_addinterface()
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'network_list',
'port_list')})
def test_router_addinterface_exception(self):
self._test_router_addinterface(raise_error=True)
def _test_router_addinterface_ip_addr(self, errors=None):
errors = errors or []
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
ip_addr = port['fixed_ips'][0]['ip_address']
self._setup_mock_addinterface_ip_addr(router, subnet, port,
ip_addr, errors)
self._check_router_addinterface(router, subnet, ip_addr)
def _setup_mock_addinterface_ip_addr(self, router, subnet, port,
ip_addr, errors=None):
errors = errors or []
subnet_get = api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)
if 'subnet_get' in errors:
subnet_get.AndRaise(self.exceptions.neutron)
return
subnet_get.AndReturn(subnet)
params = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_addr}]}
port_create = api.neutron.port_create(IsA(http.HttpRequest), **params)
if 'port_create' in errors:
port_create.AndRaise(self.exceptions.neutron)
return
port_create.AndReturn(port)
add_inf = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, port_id=port.id)
if 'add_interface' not in errors:
return
add_inf.AndRaise(self.exceptions.neutron)
port_delete = api.neutron.port_delete(IsA(http.HttpRequest), port.id)
if 'port_delete' in errors:
port_delete.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr(self):
self._test_router_addinterface_ip_addr()
@test.create_stubs({api.neutron: ('subnet_get', 'router_get',
'network_list', 'port_list')})
def test_router_addinterface_ip_addr_exception_subnet_get(self):
self._test_router_addinterface_ip_addr(errors=['subnet_get'])
@test.create_stubs({api.neutron: ('subnet_get', 'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_create(self):
self._test_router_addinterface_ip_addr(errors=['port_create'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_add_interface(self):
self._test_router_addinterface_ip_addr(errors=['add_interface'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_delete(self):
self._test_router_addinterface_ip_addr(errors=['add_interface',
'port_delete'])
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndReturn(None)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway_exception(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndRaise(self.exceptions.neutron)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
class RouterRuleTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_rules(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertTemplateNotUsed(
res,
'%s/routers/extensions/routerrules/grid.html' % self.DASHBOARD)
@test.create_stubs({api.neutron: ('network_list',)})
def test_routerrule_detail(self):
router = self.routers_with_rules.first()
if self.DASHBOARD == 'project':
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=router['tenant_id']).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
res = self._get_detail(router)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
if self.DASHBOARD == 'project':
self.assertTemplateUsed(
res,
'%s/routers/extensions/routerrules/grid.html' % self.DASHBOARD)
rules = res.context['routerrules_table'].data
self.assertItemsEqual(rules, router['router_rules'])
def _test_router_addrouterrule(self, raise_error=False):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = {'source': '1.2.3.4/32', 'destination': '4.3.2.1/32', 'id': 99,
'action': 'permit', 'nexthops': ['1.1.1.1', '2.2.2.2']}
post_router['router_rules'].insert(0, rule)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'source': rule['source'],
'destination': rule['destination'],
'action': rule['action'],
'nexthops': ','.join(rule['nexthops'])}
url = reverse('horizon:%s:routers:addrouterrule' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule(self):
self._test_router_addrouterrule()
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule_exception(self):
self._test_router_addrouterrule(raise_error=True)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'port_list', 'network_get',
'is_extension_supported')})
def test_router_removerouterrule(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = post_router['router_rules'].pop()
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.AndReturn(False)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_rule_id = rule['source'] + rule['destination']
form_data = {'router_id': pre_router.id,
'action': 'routerrules__delete__%s' % form_rule_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_list', 'port_list',
'network_get',
'is_extension_supported')})
def test_router_resetrouterrules(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
default_rules = [{'source': 'any', 'destination': 'any',
'action': 'permit', 'nexthops': [], 'id': '2'}]
del post_router['router_rules'][:]
post_router['router_rules'].extend(default_rules)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.AndReturn(False)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self._mock_network_list(pre_router['tenant_id'])
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'action': 'routerrules__resetrules'}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterRouteTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_routes(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=False)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertNotIn('extra_routes_table', res.context)
def test_routerroute_detail(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=True)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
routes = res.context['extra_routes_table'].data
routes_dict = [r._apidict for r in routes]
self.assertItemsEqual(routes_dict, router['routes'])
@test.create_stubs({api.neutron: ('router_get', 'router_update')})
def _test_router_addrouterroute(self, raise_error=False):
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['routes'].insert(0, route)
api.neutron.router_get(IsA(http.HttpRequest), pre_router.id)\
.MultipleTimes().AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = copy.deepcopy(route)
form_data['router_id'] = pre_router.id
url = reverse('horizon:%s:routers:addrouterroute' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
def test_router_addrouterroute(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute()
self.assertMessageCount(success=1)
def test_router_addrouterroute_exception(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute(raise_error=True)
self.assertMessageCount(error=1)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_get', 'port_list',
'is_extension_supported')})
def test_router_removeroute(self):
if self.DASHBOARD == 'admin':
return
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = post_router['routes'].pop()
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(True)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_route_id = route['nexthop'] + ":" + route['destination']
form_data = {'action': 'extra_routes__delete__%s' % form_route_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterViewTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_disabled_when_quota_exceeded(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 0
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertTrue('disabled' in create_action.classes,
'Create button is not disabled')
self.assertEqual('Create Router (Quota exceeded)',
create_action.verbose_name)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_shown_when_quota_disabled(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers'].pop('available')
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertFalse('disabled' in create_action.classes,
'Create button should not be disabled')
self.assertEqual('Create Router',
create_action.verbose_name)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_attributes(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 10
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertEqual(set(['ajax-modal']), set(create_action.classes))
self.assertEqual('Create Router',
six.text_type(create_action.verbose_name))
self.assertEqual('horizon:project:routers:create', create_action.url)
self.assertEqual((('network', 'create_router'),),
create_action.policy_rules)
| {
"content_hash": "73217e6eaf8b2b5489b3c0297ef0d7c8",
"timestamp": "",
"source": "github",
"line_count": 993,
"max_line_length": 80,
"avg_line_length": 44.53474320241692,
"alnum_prop": 0.5645478597110101,
"repo_name": "ankur-gupta91/horizon-net-ip",
"id": "7acc63e17e4faace6ab53fe8fa4d3ee0c1fd6d4b",
"size": "44844",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/routers/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "294011"
},
{
"name": "HTML",
"bytes": "1292070"
},
{
"name": "JavaScript",
"bytes": "3301345"
},
{
"name": "Makefile",
"bytes": "6753"
},
{
"name": "Python",
"bytes": "13673798"
},
{
"name": "Shell",
"bytes": "42875"
}
],
"symlink_target": ""
} |
import signal
import warnings
from collections import Mapping
import os
import click
import ethereum
import gevent
from IPython.core import ultratb
from ethereum.blocks import Block, genesis
from devp2p.service import BaseService
import rlp
import sys
from ethereum import slogging
import types
from ethereum.utils import bcolors
slogging.set_level('db', 'debug')
log = slogging.get_logger('db')
def load_contrib_services(config): # FIXME
# load contrib services
config_directory = config['data_dir']
contrib_directory = os.path.join(config_directory, 'contrib') # change to pyethapp/contrib
contrib_modules = []
if not os.path.exists(contrib_directory):
log.info('No contrib directory found, so not loading any user services')
return []
x = os.getcwd()
os.chdir(config_directory)
for filename in os.listdir(contrib_directory):
if filename.endswith('.py'):
print filename
try:
__import__(filename[:-3])
library_conflict = True
except:
library_conflict = False
if library_conflict:
raise Exception("Library conflict: please rename " + filename + " in contribs")
sys.path.append(contrib_directory)
contrib_modules.append(__import__(filename[:-3]))
sys.path.pop()
contrib_services = []
for module in contrib_modules:
print 'm', module, dir(module)
on_start, on_block = None, None
for variable in dir(module):
cls = getattr(module, variable)
if isinstance(cls, (type, types.ClassType)):
if issubclass(cls, BaseService) and cls != BaseService:
contrib_services.append(cls)
if variable == 'on_block':
on_block = getattr(module, variable)
if variable == 'on_start':
on_start = getattr(module, variable)
if on_start or on_block:
contrib_services.append(on_block_callback_service_factory(on_start, on_block))
log.info('Loaded contrib services', services=contrib_services)
return contrib_services
def on_block_callback_service_factory(on_start, on_block):
class _OnBlockCallbackService(BaseService):
name = 'onblockservice%d' % on_block_callback_service_factory.created
def start(self):
super(_OnBlockCallbackService, self).start()
self.app.services.chain.on_new_head_cbs.append(self.cb)
if on_start:
on_start(self.app)
def cb(self, blk):
if on_block:
on_block(blk)
on_block_callback_service_factory.created += 1
return _OnBlockCallbackService
on_block_callback_service_factory.created = 0
def load_block_tests(data, db):
"""Load blocks from json file.
:param data: the data from the json file as dictionary
:param db: the db in which the blocks will be stored
:raises: :exc:`ValueError` if the file contains invalid blocks
:raises: :exc:`KeyError` if the file is missing required data fields
:returns: a list of blocks in an ephem db
"""
scanners = ethereum.utils.scanners
initial_alloc = {}
for address, acct_state in data['pre'].items():
address = ethereum.utils.decode_hex(address)
balance = scanners['int256b'](acct_state['balance'][2:])
nonce = scanners['int256b'](acct_state['nonce'][2:])
initial_alloc[address] = {
'balance': balance,
'code': acct_state['code'],
'nonce': nonce,
'storage': acct_state['storage']
}
env = ethereum.config.Env(db=db)
genesis(env, start_alloc=initial_alloc) # builds the state trie
genesis_block = rlp.decode(ethereum.utils.decode_hex(data['genesisRLP'][2:]), Block, env=env)
blocks = {genesis_block.hash: genesis_block}
for blk in data['blocks']:
rlpdata = ethereum.utils.decode_hex(blk['rlp'][2:])
assert ethereum.utils.decode_hex(blk['blockHeader']['parentHash']) in blocks
parent = blocks[ethereum.utils.decode_hex(blk['blockHeader']['parentHash'])]
block = rlp.decode(rlpdata, Block, parent=parent, env=env)
blocks[block.hash] = block
return sorted(blocks.values(), key=lambda b: b.number)
def merge_dict(dest, source):
stack = [(dest, source)]
while stack:
curr_dest, curr_source = stack.pop()
for key in curr_source:
if key not in curr_dest:
curr_dest[key] = curr_source[key]
else:
if isinstance(curr_source[key], Mapping):
if isinstance(curr_dest[key], Mapping):
stack.append((curr_dest[key], curr_source[key]))
else:
raise ValueError('Incompatible types during merge: {} and {}'.format(
type(curr_source[key]),
type(curr_dest[key])
))
else:
curr_dest[key] = curr_source[key]
return dest
class FallbackChoice(click.Choice):
def __init__(self, choices, fallbacks, fallback_warning):
super(FallbackChoice, self).__init__(choices)
self.fallbacks = fallbacks
self.fallback_warning = fallback_warning
def convert(self, value, param, ctx):
if value in self.fallbacks:
warnings.warn(self.fallback_warning)
value = self.fallbacks[value]
return super(FallbackChoice, self).convert(value, param, ctx)
def enable_greenlet_debugger():
def _print_exception(self, context, type_, value, traceback):
ultratb.VerboseTB(call_pdb=True)(type_, value, traceback)
resp = raw_input(
"{c.OKGREEN}Debugger exited. "
"{c.OKBLUE}Do you want to quit pyethapp?{c.ENDC} [{c.BOLD}Y{c.ENDC}/n] ".format(
c=bcolors
)
).strip().lower()
if not resp or resp.startswith("y"):
os.kill(os.getpid(), signal.SIGTERM)
gevent.get_hub().__class__.print_exception = _print_exception
| {
"content_hash": "c855ad00209ed1fbb14d8ce4cb8b6252",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 97,
"avg_line_length": 36.84431137724551,
"alnum_prop": 0.6075085324232082,
"repo_name": "RomanZacharia/pyethapp",
"id": "1191b640bbeb2a62e1a04f43922dbcf81ae30dd0",
"size": "6153",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyethapp/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "299219"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
import tempfile
from io import StringIO
from django.contrib.gis import gdal
from django.contrib.gis.db.models import Extent, MakeLine, Union, functions
from django.contrib.gis.geos import (
GeometryCollection, GEOSGeometry, LinearRing, LineString, MultiLineString,
MultiPoint, MultiPolygon, Point, Polygon, fromstr,
)
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from ..utils import (
mysql, no_oracle, oracle, postgis, skipUnlessGISLookup, spatialite,
)
from .models import (
City, Country, Feature, MinusOneSRID, NonConcreteModel, PennsylvaniaCity,
State, Track,
)
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
# Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
with self.assertRaisesMessage(TypeError, 'Cannot set'):
nullcity.point = bad
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5, srid=4326), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5, srid=4326), City.objects.get(name='NullCity').point)
nullcity.delete()
# Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# San Antonio in 'WGS 84 / Pseudo-Mercator' (SRID 3857)
other_srid_pnt = wgs_pnt.transform(3857, clone=True)
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
if oracle:
tx = Country.objects.get(mpoly__contains=other_srid_pnt)
else:
tx = Country.objects.get(mpoly__intersects=other_srid_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=other_srid_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertIsNone(c.point)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
# TODO: fix on Oracle: ORA-22901: cannot compare nested table or VARRAY or
# LOB attributes of an object type.
@no_oracle
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Database functions on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.annotate(new_point=functions.Transform('point', srid=32128))
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.new_point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as tmp:
tmp.write(result)
tmp.seek(0)
call_command('loaddata', tmp.name, verbosity=0)
self.assertEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("supports_empty_geometries")
def test_empty_geometries(self):
geometry_classes = [
Point,
LineString,
LinearRing,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon,
GeometryCollection,
]
for klass in geometry_classes:
g = klass(srid=4326)
feature = Feature.objects.create(name='Empty %s' % klass.__name__, geom=g)
feature.refresh_from_db()
if klass is LinearRing:
# LinearRing isn't representable in WKB, so GEOSGeomtry.wkb
# uses LineString instead.
g = LineString(srid=4326)
self.assertEqual(feature.geom, g)
self.assertEqual(feature.geom.srid, g.srid)
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
if connection.features.supports_real_shape_operations:
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Testing `contains` on the states using the point for Lawrence.
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)),
0 if connection.features.supports_real_shape_operations else 1) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(
name='Line1',
line=LineString([(-95, 29), (-60, 0)])
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 0), (-60, 29)])).count(),
1
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 30), (0, 30)])).count(),
0
)
@skipUnlessDBFeature("supports_isvalid_lookup")
def test_isvalid_lookup(self):
invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))')
State.objects.create(name='invalid', poly=invalid_geom)
qs = State.objects.all()
if oracle or mysql:
# Kansas has adjacent vertices with distance 6.99244813842e-12
# which is smaller than the default Oracle tolerance.
# It's invalid on MySQL too.
qs = qs.exclude(name='Kansas')
self.assertEqual(State.objects.filter(name='Kansas', poly__isvalid=False).count(), 1)
self.assertEqual(qs.filter(poly__isvalid=False).count(), 1)
self.assertEqual(qs.filter(poly__isvalid=True).count(), qs.count() - 1)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
@skipUnlessGISLookup("strictly_above", "strictly_below")
def test_strictly_above_below_lookups(self):
dallas = City.objects.get(name='Dallas')
self.assertQuerysetEqual(
City.objects.filter(point__strictly_above=dallas.point).order_by('name'),
['Chicago', 'Lawrence', 'Oklahoma City', 'Pueblo', 'Victoria'],
lambda b: b.name
)
self.assertQuerysetEqual(
City.objects.filter(point__strictly_below=dallas.point).order_by('name'),
['Houston', 'Wellington'],
lambda b: b.name
)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertIsNone(nmi.poly)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param raises a TypeError when
# initializing the QuerySet.
with self.assertRaises(ValueError):
Country.objects.filter(mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
with self.assertRaises(e):
qs.count()
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
class GeoQuerySetTest(TestCase):
# TODO: GeoQuerySet is removed, organize these test better.
fixtures = ['initial']
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent(self):
"""
Testing the `Extent` aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent = qs.aggregate(Extent('point'))['point__extent']
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
def test_make_line(self):
"""
Testing the `MakeLine` aggregate.
"""
if not connection.features.supports_make_line_aggr:
with self.assertRaises(NotImplementedError):
City.objects.all().aggregate(MakeLine('point'))
return
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line = City.objects.aggregate(MakeLine('point'))['point__makeline']
self.assertTrue(
ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line)
)
@skipUnlessDBFeature('supports_union_aggr')
def test_unionagg(self):
"""
Testing the `Union` aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union = GEOSGeometry('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
qs = City.objects.filter(point__within=tx)
with self.assertRaises(ValueError):
qs.aggregate(Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.aggregate(Union('point'))['point__union']
u2 = qs.order_by('name').aggregate(Union('point'))['point__union']
self.assertTrue(union.equals(u1))
self.assertTrue(union.equals(u2))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_within_subquery(self):
"""
Using a queryset inside a geo lookup is working (using a subquery)
(#14483).
"""
tex_cities = City.objects.filter(
point__within=Country.objects.filter(name='Texas').values('mpoly')).order_by('name')
expected = ['Dallas', 'Houston']
if not connection.features.supports_real_shape_operations:
expected.append('Oklahoma City')
self.assertEqual(
list(tex_cities.values_list('name', flat=True)),
expected
)
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
def test_values_srid(self):
for c, v in zip(City.objects.all(), City.objects.values()):
self.assertEqual(c.point.srid, v['point'].srid)
| {
"content_hash": "3b31b8d59674f7e6f7e0acd149ac5b21",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 117,
"avg_line_length": 43.41818181818182,
"alnum_prop": 0.6214405360134003,
"repo_name": "labcodes/django",
"id": "1ccac6fd1c057c2100122c1e3331152a6cb88ae2",
"size": "23880",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/gis_tests/geoapp/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50223"
},
{
"name": "HTML",
"bytes": "174074"
},
{
"name": "JavaScript",
"bytes": "248667"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11332020"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from tornado.web import RequestHandler
class Handler(RequestHandler):
async def get(self):
self.set_header('Content-Type', 'text/plain')
if hasattr(self.application, 'shutting_down') and self.application.shutting_down == True:
self.write('SHUTTING DOWN')
self.set_status(503)
return
self.set_status(200)
self.write('READY') | {
"content_hash": "a7056ad2a9d2a4ff886dc0f59ee356dc",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 97,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.6322418136020151,
"repo_name": "thomaserlang/storitch",
"id": "1e997ae2bdf2469889d7e1d75c09bdda2e61a29e",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "storitch/handlers/health.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "723"
},
{
"name": "Python",
"bytes": "27439"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
class DetailsPlugin(BaseAdminPlugin):
show_detail_fields = []
show_all_rel_details = True
def result_item(self, item, obj, field_name, row):
if (self.show_all_rel_details or (field_name in self.show_detail_fields)):
rel_obj = None
if hasattr(item.field, 'rel') and isinstance(item.field.rel, models.ManyToOneRel):
rel_obj = getattr(obj, field_name)
elif field_name in self.show_detail_fields:
rel_obj = obj
if rel_obj:
if rel_obj.__class__ in site._registry:
try:
model_admin = site._registry[rel_obj.__class__]
has_view_perm = model_admin(self.admin_view.request).has_view_permission(rel_obj)
has_change_perm = model_admin(self.admin_view.request).has_change_permission(rel_obj)
except:
has_view_perm = self.admin_view.has_model_perm(rel_obj.__class__, 'view')
has_change_perm = self.has_model_perm(rel_obj.__class__, 'change')
else:
has_view_perm = self.admin_view.has_model_perm(rel_obj.__class__, 'view')
has_change_perm = self.has_model_perm(rel_obj.__class__, 'change')
if rel_obj and has_view_perm:
opts = rel_obj._meta
try:
item_res_uri = reverse(
'%s:%s_%s_detail' % (self.admin_site.app_name,
opts.app_label, opts.model_name),
args=(getattr(rel_obj, opts.pk.attname),))
if item_res_uri:
if has_change_perm:
edit_url = reverse(
'%s:%s_%s_change' % (self.admin_site.app_name, opts.app_label, opts.model_name),
args=(getattr(rel_obj, opts.pk.attname),))
else:
edit_url = ''
item.btns.append('<a data-res-uri="%s" data-edit-uri="%s" class="details-handler" rel="tooltip" title="%s"><i class="fa fa-info-sign"></i></a>'
% (item_res_uri, edit_url, _(u'Details of %s') % str(rel_obj)))
except NoReverseMatch:
pass
return item
# Media
def get_media(self, media):
if self.show_all_rel_details or self.show_detail_fields:
media = media + self.vendor('xadmin.plugin.details.js', 'xadmin.form.css')
return media
site.register_plugin(DetailsPlugin, ListAdminView)
| {
"content_hash": "fb40a676d9040168bafb420163f83407",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 167,
"avg_line_length": 47.85245901639344,
"alnum_prop": 0.5220966084275437,
"repo_name": "pobear/django-xadmin",
"id": "168649da16e49fa309904003be66a9266de330ff",
"size": "2921",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "xadmin/plugins/details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23733"
},
{
"name": "HTML",
"bytes": "95259"
},
{
"name": "JavaScript",
"bytes": "65236"
},
{
"name": "Python",
"bytes": "425488"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
from calvin.runtime.south.calvinsys import base_calvinsys_object
class MockInputOutput(base_calvinsys_object.BaseCalvinsysObject):
"""
MockInputOutput - Mocked input output device, printing data to stdout
"""
init_schema = {
"description": "Init object",
"type": "object",
"properties": {
"data": {
"description": "Data to return in read",
"type": "array"
}
}
}
can_read_schema = {
"description": "Returns True if data can be read, otherwise False",
"type": "boolean"
}
read_schema = {
"description": "Get data, verifies that can_read has been called."
}
can_write_schema = {
"description": "Always returns True",
"type": "boolean"
}
write_schema = {
"description": "Compares data to expected data specified in actor test, also verifies that can_write has been called."
}
def init(self, data, **kwargs):
self.read_called = False
self._read_allowed = True
self.write_called = False
self._write_allowed = True
self.data = list(data)
self._expected_data = []
calvinsys = kwargs.get('calvinsys', '')
if 'read' in calvinsys:
self.data = calvinsys['read']
if 'write' in calvinsys:
self._expected_data = calvinsys['write']
def can_read(self):
self._read_allowed = True
return len(self.data) > 0
def read(self):
self.read_called = True
if not self._read_allowed:
raise AssertionError("read() called without preceding can_read()")
self._read_allowed = False
return self.data.pop(0)
def can_write(self):
self._write_allowed = True
return True
def write(self, data):
self.write_called = True
if not self._write_allowed:
raise AssertionError("write() called without preceding can_write()")
self._write_allowed = False
if self._expected_data:
expected = self._expected_data.pop(0)
if expected != data:
raise AssertionError("Expected data '%s' does not match '%s'" % (expected, data))
def close(self):
self.data = []
self._expected_data = []
def start_verifying_calvinsys(self):
self._read_allowed = False
self._write_allowed = False
| {
"content_hash": "4319bf5f82f878cd4553a31127591789",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 126,
"avg_line_length": 28.71764705882353,
"alnum_prop": 0.5702580909463335,
"repo_name": "EricssonResearch/calvin-base",
"id": "ebb0272a1eb0410b5967a326e2cfbaea03165dd8",
"size": "3046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvinextras/calvinsys/mock/MockInputOutput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
} |
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.languages.node_mono_repo as node
import os
node.owlbot_main(relative_dir="packages/google-cloud-monitoring",staging_excludes=["README.md", "package.json"])
# --------------------------------------------------------------------------
# Modify test configs
# --------------------------------------------------------------------------
# add shared environment variables to test configs
s.move(
".kokoro/common_env_vars.cfg",
".kokoro/common.cfg",
merge=lambda src, dst, _, : f"{dst}\n{src}",
)
for path, subdirs, files in os.walk(f".kokoro/continuous"):
for name in files:
if name == "common.cfg":
file_path = os.path.join(path, name)
s.move(
".kokoro/common_env_vars.cfg",
file_path,
merge=lambda src, dst, _, : f"{dst}\n{src}",
)
| {
"content_hash": "75bbe04535d3d5206727a5aafc8d582a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 112,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.519916142557652,
"repo_name": "googleapis/google-cloud-node",
"id": "3289c79cd0ea53c24e8edfbdcea263d61ea3bdb4",
"size": "1529",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-cloud-monitoring/owlbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2789"
},
{
"name": "JavaScript",
"bytes": "10920867"
},
{
"name": "Python",
"bytes": "19983"
},
{
"name": "Shell",
"bytes": "58046"
},
{
"name": "TypeScript",
"bytes": "77312562"
}
],
"symlink_target": ""
} |
"""
Meerkat Hermes Tests
Unit tests for Meerkat Hermes util methods and resource classes.
"""
from boto3.dynamodb.conditions import Key
from unittest import mock
from datetime import datetime
import meerkat_hermes.util as util
import meerkat_hermes
from meerkat_hermes import app
import requests
import json
import unittest
import boto3
import logging
import copy
import time
class MeerkatHermesTestCase(unittest.TestCase):
# Define the test subscriber
subscriber = dict(
first_name='Testy',
last_name='McTestFace',
email='[email protected]',
sms='01234567891',
topics=['Test1', 'Test2', 'Test3'],
country="Test"
)
# Define the test message
message = dict(
subject='Test email',
message='Nosetest Message',
html='Test <b>HTML</b> message',
)
@classmethod
def setup_class(self):
"""Setup for testing"""
app.config.from_object('meerkat_hermes.config.Testing')
self.app = meerkat_hermes.app.test_client()
# Load the database
db = boto3.resource(
'dynamodb',
endpoint_url=app.config['DB_URL'],
region_name='eu-west-1'
)
self.subscribers = db.Table(app.config['SUBSCRIBERS'])
self.subscriptions = db.Table(app.config['SUBSCRIPTIONS'])
self.log = db.Table(app.config['LOG'])
# Only show warning level+ logs from boto3, botocore and nose.
# Too verbose otherwise.
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
@classmethod
def teardown_class(self):
"""
At the end of testing, clean up any database mess created by the
tests and log any activity.
"""
# Ideally nothing should be deleted here
# This teardown checks that the database is clean.
# Keep track of # of deletions to log as a warning so dev can check.
deletedObjects = {
"subscribers": 0,
"messages": 0
}
# Get rid of any undeleted test subscribers.
query_response = self.subscribers.query(
IndexName='email-index',
KeyConditionExpression=Key('email').eq(
'[email protected]'
)
)
with self.subscribers.batch_writer() as batch:
for subscriber in query_response['Items']:
batch.delete_item(
Key={
'id': subscriber['id']
}
)
deletedObjects['subscribers'] = len(query_response['Items'])
# Get rid of any test messages that have been logged and not deleted.
query_response = self.log.query(
IndexName='message-index',
KeyConditionExpression=Key('message').eq(self.message['message'])
)
with self.log.batch_writer() as batch:
for message in query_response['Items']:
batch.delete_item(
Key={
'id': message['id']
}
)
deletedObjects['messages'] = len(query_response['Items'])
# Do the logging only if something has been deleted.
if sum(deletedObjects.values()) != 0:
logged = ("TEARING DOWN UTIL TEST CLASS "
"SHOULD NOT REQUIRE DELETION:\n")
for obj in deletedObjects:
if deletedObjects[obj] != 0:
logged += "Deleted " + \
str(deletedObjects[obj]) + " " + obj + ".\n"
meerkat_hermes.app.logger.warning(logged)
assert False
def test_util_replace_keywords(self):
"""
Test the replace keywords utility function that enables mail merge in
our messages.
"""
for key in self.subscriber:
message = "<<" + key + ">>"
value = str(self.subscriber[key])
if(key == 'topics'):
value = "Test1, Test2 and Test3"
self.assertEquals(value, util.replace_keywords(
message, self.subscriber))
def test_util_id_valid(self):
"""
Test the id_valid utility function that checks whether a message ID
already exists.
"""
# Create test message log.
log = {
'id': 'testID',
'destination': [self.subscriber['email']],
'message': self.message['message'],
'medium': ['email'],
'time': util.get_date()
}
self.log.put_item(Item=log)
# Test the id_valid utility function.
existing_id = log['id']
nonexisting_id = 'FAKETESTID'
self.assertFalse(util.id_valid(existing_id))
self.assertTrue(util.id_valid(nonexisting_id))
# Delete the created log
delete_response = self.app.delete('/log/' + log['id'])
delete_response = json.loads(delete_response.data.decode('UTF-8'))
print(delete_response)
self.assertEquals(delete_response['ResponseMetadata'][
'HTTPStatusCode'], 200)
def test_util_check_date(self):
"""Test the create subscriptions utility function."""
self.assertEquals(
datetime.fromtimestamp(time.time()).strftime('%Y:%m:%dT%H:%M:%S'),
util.get_date()
)
# TODO: Tests for these util functions would be almost doubled later on:
# - log_message()
# - send_sms()
# - send_email()
# - delete_subscriber()
# Util unit tests therefore havn't been considered a priority for
# including here. But it would be nice to write proper unit tests for
# these functions when we have time.
def test_subscribe_resource(self):
"""
Test the Subscribe resource, including the PUT, GET and DELETE methods.
"""
# Create the test subscribers
put_response = self.app.put('/subscribe', data=self.subscriber)
self.assertEquals(put_response.status_code, 200)
# Get the assigned subscriber id.
data = json.loads(put_response.data.decode('UTF-8'))
subscriber_id = data['subscriber_id']
print("Subscriber ID is " + data['subscriber_id'])
# Check that the subscriber exists in the data base.
get_response = self.subscribers.get_item(
Key={
'id': data['subscriber_id']
}
)
self.assertEquals(
self.subscriber['email'], get_response['Item']['email']
)
# Try to delete the subscriber.
delete_response = self.app.delete('/subscribe/badID')
self.assertEquals(delete_response.status_code, 500)
delete_response = json.loads(delete_response.data.decode('UTF-8'))
self.assertEquals(delete_response.get('status'), 'unsuccessful')
delete_response = self.app.delete('/subscribe/' + subscriber_id)
self.assertEquals(delete_response.status_code, 200)
delete_response = json.loads(delete_response.data.decode('UTF-8'))
self.assertEquals(delete_response.get('status'), 'successful')
def test_subscribers_resource(self):
"""
Test the Subscribers resource GET method.
"""
# Create four test subscribers, each with a specified country
countries = ['Madagascar', 'Madagascar', 'Madagascar', 'Jordan']
subscriber_ids = []
for i in range(0, len(countries)):
# Create a variation on the test subscriber
subscriber = self.subscriber.copy()
subscriber['country'] = countries[i]
subscriber['first_name'] += str(i)
# Add the subscriber to the database.
subscribe_response = self.app.put('/subscribe', data=subscriber)
subscriber_ids.append(json.loads(
subscribe_response.data.decode('UTF-8')
)['subscriber_id'])
# Get all the Madagascar subscribers
get_response = self.app.get('/subscribers/Madagascar')
get_response = json.loads(get_response.data.decode('UTF-8'))
logging.warning(get_response)
self.assertEqual(len(get_response), 3)
# Get all the Jordan subscribers
get_response = self.app.get('/subscribers/Jordan')
get_response = json.loads(get_response.data.decode('UTF-8'))
logging.warning(get_response)
self.assertEqual(len(get_response), 1)
# Delete the test subscribers.
for subscriber_id in subscriber_ids:
self.app.delete('/subscribe/' + subscriber_id)
def test_verify_resource(self):
"""
Test the Verify resource, including the GET, POST and PUT methods.
"""
# Create the unverified test subscriber
subscribe_response = self.app.put('/subscribe', data=self.subscriber)
subscriber_id = json.loads(
subscribe_response.data.decode('UTF-8'))['subscriber_id']
# Test PUT method.
put_data = {'subscriber_id': subscriber_id, 'code': '1234'}
put_response = self.app.put('/verify', data=put_data)
self.assertEquals(put_response.status_code, 200)
# Test POST method for wrong and right code.
post_data = {'subscriber_id': subscriber_id, 'code': '1231'}
post_response = self.app.post('/verify', data=post_data)
post_response = json.loads(post_response.data.decode('UTF-8'))
self.assertEquals(post_response['matched'], False)
post_data = {'subscriber_id': subscriber_id, 'code': '1234'}
post_response = self.app.post('/verify', data=put_data)
post_response = json.loads(post_response.data.decode('UTF-8'))
self.assertEquals(post_response['matched'], True)
# Test GET method, for unverified and verified user.
get_response = self.app.get('/verify/' + subscriber_id)
self.assertEquals(get_response.status_code, 200)
get_response = self.app.get('/verify/' + subscriber_id)
self.assertEquals(get_response.status_code, 400)
# Delete the user
self.app.delete('/subscribe/' + subscriber_id)
def test_unsubscribe_resource(self):
"""
Test the Unsubscribe resource, including the GET and POST methods.
"""
# Create the test subscriber
subscribe_response = self.app.put('/subscribe', data=self.subscriber)
subscriber_id = json.loads(
subscribe_response.data.decode('UTF-8')
)['subscriber_id']
# Test GET method
get_response = self.app.get('/unsubscribe/' + subscriber_id)
self.assertIn("sure you want to unsubscribe",
get_response.data.decode('UTF-8'))
# Test POST method
post_response = self.app.post('/unsubscribe/' + subscriber_id)
self.assertIn("successfully unsubscribed",
post_response.data.decode('UTF-8'))
# Delete the user
self.app.delete('/subscribe/' + subscriber_id)
def test_email_resource(self):
"""
Test the Email resource PUT method, using the Amazon SES Mailbox
Simulators.
"""
# Create the test subscriber
subscribe_response = self.app.put('/subscribe', data=self.subscriber)
subscriber_id = json.loads(
subscribe_response.data.decode('UTF-8')
)['subscriber_id']
# Test the PUT method using an email address.
email = {**self.message, **{"email": self.subscriber['email']}}
put_response = self.app.put('/email', data=email)
put_response = json.loads(put_response.data.decode('UTF-8'))
self.assertEquals(
put_response['ResponseMetadata']['HTTPStatusCode'], 200
)
# Check that the message has been logged properly.
log_response = self.log.get_item(
Key={
'id': put_response['log_id']
}
)
self.assertEquals(
log_response['Item']['destination'][0], email['email']
)
# Delete the message from the log
self.app.delete('/log/' + put_response['log_id'])
# Test the PUT method using a subscriber ID.
email = {**self.message, **{"subscriber_id": subscriber_id}}
put_response = self.app.put('/email', data=email)
put_response = json.loads(put_response.data.decode('UTF-8'))
self.assertEquals(put_response['ResponseMetadata'][
'HTTPStatusCode'], 200)
# Check that the message has been logged properly.
log_response = self.log.get_item(
Key={
'id': put_response['log_id']
}
)
self.assertEquals(
log_response['Item']['destination'][0], self.subscriber['email']
)
# Delete the user
self.app.delete('/subscribe/' + subscriber_id)
# Delete the message from the log
self.app.delete('/log/' + put_response['log_id'])
def test_log_resource(self):
"""Test the Log resource GET and Delete methods."""
# Create test message log.
log = {
'id': 'testID',
'destination': [self.subscriber['email']],
'message': self.message['message'],
'medium': ['email'],
'time': util.get_date()
}
self.log.put_item(Item=log)
# Test the GET Method
get_response = self.app.get('/log/' + log['id'])
get_response = json.loads(get_response.data.decode('UTF-8'))
print(get_response)
self.assertEquals(get_response['Item']['destination'][
0], self.subscriber['email'])
self.assertEquals(get_response['Item'][
'message'], self.message['message'])
# Test the DELETE Method
delete_response = self.app.delete('/log/' + log['id'])
delete_response = json.loads(delete_response.data.decode('UTF-8'))
print(delete_response)
self.assertEquals(
delete_response['ResponseMetadata']['HTTPStatusCode'],
200
)
@mock.patch('meerkat_hermes.util.boto3.client')
def test_sms_resource(self, sns_mock):
"""
Test the SMS resource PUT method, using the fake response returned
by util.send_sms().
"""
sms = {
"message": self.message['message'],
"sms": self.subscriber['sms']
}
# Create the mock response.
sns_mock.return_value.publish.return_value = {
"ResponseMetadata": {
"RequestId": "c13d1005-2433-55e0-91bc-4236210aa11c",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "c13d1005-2433-55e0-91bc-4236210aa11c",
"content-type": "text/xml",
"date": "Wed, 13 Sep 2017 10:05:45 GMT",
"content-length": "294"
},
"RetryAttempts": 0
},
"log_id": "G2c820c31a05b4da593c689bd8c534c82",
"MessageId": "edd4bd71-9ecf-5ebc-9d5c-ef429bf6da40"
}
# Test PUT method.
put_response = self.app.put('/sms', data=sms)
put_response = json.loads(put_response.data.decode('UTF-8'))
self.assertTrue(sns_mock.return_value.publish)
sns_mock.return_value.publish.assert_called_with(
Message=sms['message'],
PhoneNumber=sms['sms'],
MessageAttributes={
'AWS.SNS.SMS.SenderID': {
'DataType': 'String',
'StringValue': app.config['FROM']
}
}
)
self.assertEquals(
put_response['ResponseMetadata']['RetryAttempts'],
0
)
self.assertEquals(
put_response['ResponseMetadata']['HTTPStatusCode'],
200
)
# Check that the message has been logged properly.
log_response = self.log.get_item(
Key={
'id': put_response['log_id']
}
)
self.assertEquals(log_response['Item']['destination'][0], sms['sms'])
# Delete the message from the log
self.app.delete('/log/' + put_response['log_id'])
@mock.patch('meerkat_hermes.util.requests.post')
def test_gcm_resource(self, request_mock):
"""
Test the GCM resource PUT method, using the fake response returned
by util.send_gcm().
"""
gcm = {
"message": self.message['message'],
"destination": '/topics/demo'
}
# Create the mock response.
dummyResponseDict = {
"multicast_id":123456,
"success":1,
"failure":0,
"canonical_ids":0,
"results":[{"message_id":"0:abc123"}]
}
dummyResponse = requests.Response()
dummyResponse.status_code = 200
dummyResponse._content = json.dumps(dummyResponseDict).encode()
request_mock.return_value = dummyResponse
# Test PUT method.
put_response = self.app.put('/gcm', data=gcm)
put_response = json.loads(put_response.data.decode('UTF-8'))
call_data = {
"data":
{"message": self.message['message']},
"to": "/topics/demo"}
call_headers={
'Content-Type': 'application/json',
'Authorization': 'key='+app.config['GCM_AUTHENTICATION_KEY']}
self.assertTrue(request_mock.called)
request_mock.assert_called_with('https://gcm-http.googleapis.com/gcm/send',
data=json.dumps(call_data),
headers=call_headers)
self.assertEquals(put_response['success'], 1)
# Check that the message has been logged properly.
log_response = self.log.get_item(
Key={
'id': put_response['log_id']
}
)
print(str(log_response))
self.assertEquals(log_response['Item']['destination'], gcm['destination'])
# Delete the message from the log
self.app.delete('/log/' + put_response['log_id'])
@mock.patch('meerkat_hermes.util.boto3.client')
def test_publish_resource(self, boto_mock):
"""Test the Publish resource PUT method."""
def clones(object, times=None):
# Generator to yield clones of an object, infitnely or <n times.
# Used to generate nexmo response for the mock_sms_response
if times is None:
while True:
yield copy.copy(object)
else:
for i in range(times):
yield copy.copy(object)
# Createfour test subscribers, each with subscriptions to a different
# list of topics.
topic_lists = [
['Test1', 'Test2'],
['Test1'],
['Test2'],
['Test3'],
['Test1']
]
subscriber_ids = []
for i in range(0, len(topic_lists)):
# Create a variation on the test subscriber
subscriber = self.subscriber.copy()
subscriber['topics'] = topic_lists[i]
subscriber['first_name'] += str(i)
# Remove the SMS field from three of the subscribers
if(i % 2 != 0):
del subscriber['sms']
# Create an unverified subscriber
if i is not 4:
subscriber['verified'] = True
# Add the subscriber to the database.
subscribe_response = self.app.put('/subscribe', data=subscriber)
subscriber_ids.append(json.loads(
subscribe_response.data.decode('UTF-8')
)['subscriber_id'])
# Create the mock response.
boto_mock.return_value.publish.side_effect = clones({
"ResponseMetadata": {
"RequestId": "c13d1005-2433-55e0-91bc-4236210aa11c",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "c13d1005-2433-55e0-91bc-4236210aa11c",
"content-type": "text/xml",
"date": "Wed, 13 Sep 2017 10:05:45 GMT",
"content-length": "294"
},
"RetryAttempts": 0
},
"log_id": "G2c820c31a05b4da593c689bd8c534c82",
"MessageId": "edd4bd71-9ecf-5ebc-9d5c-ef429bf6da40"
})
boto_mock.return_value.send_email.side_effect = clones({
"MessageId": "0102015e7afbfec3-cf8df94b-81bc-4c9b5966a4-000000",
"ResponseMetadata": {
"RequestId": "270fa909-9876-11e7-a0db-e3a14f067914",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "270fa909-9876-11e7-a0db-e3a14f067914",
"content-type": "text/xml",
"date": "Wed, 13 Sep 2017 11:24:48 GMT",
"content-length": "326"
},
"RetryAttempts": 0
},
"log_id": "G891694c3d4364f89bb124e31bfb15b58",
})
# Create the message.
message = self.message.copy()
message['html-message'] = message.pop('html')
message['medium'] = ['email', 'sms']
# Keep track of the message IDs so we can delete the logs afterwards.
message_ids = []
# Test the PUT Method with different calls.
# -----------------------------------------
# Publish the test message to topic Test4.
message['topics'] = ['Test4']
message['id'] = "testID1"
message_ids.append(message['id'])
put_response = self.app.put('/publish', data=message)
put_response = json.loads(put_response.data.decode('UTF-8'))
print(put_response)
# No subscribers have subscribed to 'Test4'.
# No messages should be sent
# Check that no messages have been sent and that the sms response has
# not been called.
self.assertEquals(len(put_response), 0)
self.assertFalse(boto_mock.return_value.publish.called)
# Publish the test message to topic Test3.
message['topics'] = ['Test3']
message['id'] = "testID2"
message_ids.append(message['id'])
put_response = self.app.put('/publish', data=message)
put_response = json.loads(put_response.data.decode('UTF-8'))
print("Response to publishing message to topic: " +
str(message['topics']) + "\n" + str(put_response))
# Only subscriber 4 has subscription to 'Test3'.
# Subscriber 4 hasn't given an SMS number, so only one email is sent.
# Check only one email is sent and no sms calls are made.
print(put_response)
self.assertEquals(len(put_response), 1)
self.assertFalse(boto_mock.return_value.publish.called)
self.assertEquals(put_response[0]['Destination'][
0], self.subscriber['email'])
# Publish the test message to topic Test1.
message['topics'] = ['Test1']
message['id'] = "testID3"
message_ids.append(message['id'])
put_response = self.app.put('/publish', data=message)
put_response = json.loads(put_response.data.decode('UTF-8'))
print("Response to publishing message to topic: " +
str(message['topics']) + "\n" + str(put_response))
# Subscriber 1 and 2 have subscriptions to 'Test1'.
# Subscriber 5 is unverified so gets no messages.
# Subscriber 2 hasn't given an SMS number, so 2 emails and 1 sms sent.
# Check three messages sent in total and sms mock called once.
self.assertEquals(len(put_response), 3)
self.assertTrue(boto_mock.return_value.publish.call_count == 1)
# Publish the test message to both topics Test1 and Test2.
message['topics'] = ['Test1', 'Test2']
message['id'] = "testID4"
message_ids.append(message['id'])
put_response = self.app.put('/publish', data=message)
put_response = json.loads(put_response.data.decode('UTF-8'))
print("Response to publishing message to topic: " +
str(message['topics']) + "\n" + str(put_response))
# Sub 1 subscribed to 'Test1' and 'Test2' but only gets 1 sms & email.
# Sub 2 subscribed to 'Test1' but no sms, so gets just one email.
# Sub 3 subscribed to 'Test2' gets 1 email and sms.
# Sub 5 subscriber to 'Test1' but unverified so gets no messages.
# Note that the publish resource removes duplications for subscriber 1.
# This results in 4 messages to sub 1, 1 to sub 2, and 2 to sub 3.
# Check number of messages sent is 7 and that sms mock has been called
# ANOTHER 2 times (i.e. called 1+2=3 times in total)
self.assertEquals(len(put_response), 5)
self.assertTrue(boto_mock.return_value.publish.call_count == 3)
# Delete the logs.
for message_id in message_ids:
self.app.delete('/log/' + message_id)
# Delete the test subscribers.
for subscriber_id in subscriber_ids:
self.app.delete('/subscribe/' + subscriber_id)
# Test whether the publish resources squashes requests when they exceed
# the rate limit. Already called 4 times so set the limit to 4 and
# check that a 5th attempt to publish fails....
app.config['PUBLISH_RATE_LIMIT'] = 4
message['topics'] = ['Test4']
message['id'] = "testID1"
message_ids.append(message['id'])
put_response = self.app.put('/publish', data=message)
put_response_json = json.loads(put_response.data.decode('UTF-8'))
print(put_response_json)
print(put_response)
self.assertEquals(put_response.status_code, 503)
self.assertTrue(put_response_json.get('message', False))
app.config['PUBLISH_RATE_LIMIT'] = 20
# TODO Test Error and Notify Resources
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4aa66b1c20e7360012364cc5afb5ce81",
"timestamp": "",
"source": "github",
"line_count": 698,
"max_line_length": 83,
"avg_line_length": 37.64756446991404,
"alnum_prop": 0.5708196970850141,
"repo_name": "meerkat-code/meerkat_hermes",
"id": "9e7a809463ebe54feee0c0bef2fa6a1b8e93d9c5",
"size": "26301",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "meerkat_hermes/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90243"
},
{
"name": "Shell",
"bytes": "443"
}
],
"symlink_target": ""
} |
__author__ = 'hou'
import subprocess
def run(cmd, capture = False):
out_stream = subprocess.PIPE if capture else None
err_stream = subprocess.PIPE if capture else None
print cmd
p = subprocess.Popen(cmd, shell=True, stdout=out_stream, stderr=err_stream)
(stdout, stderr) = p.communicate()
stdout = stdout.strip() if stdout else ""
stderr = stderr.strip() if stderr else ""
return_code = p.returncode
success = (return_code == 0)
return Result(cmd, stdout, stderr, success, return_code)
class Result(object):
def __init__(self, cmd, stdout, stderr, success, return_code):
self.value = {}
self.value.setdefault('cmd', cmd)
self.value.setdefault('stdout', stdout)
self.value.setdefault('stderr', stderr)
self.value.setdefault('success', success)
self.value.setdefault('return_code', return_code)
def cmd(self):
return self.value.get('cmd', '')
def stdout(self):
return self.value.get('stdout', '')
def stderr(self):
return self.value.get('stderr', '')
def success(self):
return self.value.get('success', False)
def return_code(self):
return self.value.get('return_code', -1)
def __repr__(self):
return self.value.__repr__()
| {
"content_hash": "29652441d1e4fdf928054cd0111c5fed",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 28.217391304347824,
"alnum_prop": 0.6240369799691834,
"repo_name": "begeekmyfriend/ezfm_diarisation",
"id": "d2f249a7e3cd777a5584212453abddbd134e46c3",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7105"
}
],
"symlink_target": ""
} |
people = 20
cats = 30
dogs = 15
if people < cats:
print "Too many cats! The world is doomed!"
if people > cats:
print "Not many cats! The world is saved!"
if people < dogs:
print "The world is drooled on!"
if people > dogs:
print "The world is dry!"
dogs += 5
if people >= dogs:
print "People are greater than or equal to dogs."
if people <= dogs:
print "People are less than or equal to dogs."
if people == dogs:
print "People are dogs."
| {
"content_hash": "c20883cfe21e46aeed0ee8dc6ad7f5df",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 51,
"avg_line_length": 16.535714285714285,
"alnum_prop": 0.6587473002159827,
"repo_name": "jmcguire/learning",
"id": "c71de51a182fd8ef9ab1d750262f5cd938a199b2",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/learn_python_the_hard_way/ex29.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Io",
"bytes": "5745"
},
{
"name": "Prolog",
"bytes": "1026"
},
{
"name": "Python",
"bytes": "107835"
},
{
"name": "Ruby",
"bytes": "3066"
}
],
"symlink_target": ""
} |
from google.cloud import managedidentities_v1
def sample_validate_trust():
# Create a client
client = managedidentities_v1.ManagedIdentitiesServiceClient()
# Initialize request argument(s)
trust = managedidentities_v1.Trust()
trust.target_domain_name = "target_domain_name_value"
trust.trust_type = "EXTERNAL"
trust.trust_direction = "BIDIRECTIONAL"
trust.target_dns_ip_addresses = ['target_dns_ip_addresses_value1', 'target_dns_ip_addresses_value2']
trust.trust_handshake_secret = "trust_handshake_secret_value"
request = managedidentities_v1.ValidateTrustRequest(
name="name_value",
trust=trust,
)
# Make the request
operation = client.validate_trust(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END managedidentities_v1_generated_ManagedIdentitiesService_ValidateTrust_sync]
| {
"content_hash": "d0dd9f413e249f696083794ceef0d1e2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 104,
"avg_line_length": 30.967741935483872,
"alnum_prop": 0.7208333333333333,
"repo_name": "googleapis/python-managed-identities",
"id": "b5ec2ea243074a80009b993b1a24d8f80a5009b0",
"size": "2377",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/managedidentities_v1_generated_managed_identities_service_validate_trust_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "410105"
},
{
"name": "Shell",
"bytes": "30696"
}
],
"symlink_target": ""
} |
"""Author: Krzysztof Trzepla
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Brings up a set of onepanel nodes. They can create separate clusters.
"""
import copy
import json
import os
from . import common, docker, dns, gui
def panel_domain(instance, uid):
"""Formats domain for a docker hosting onepanel."""
return common.format_hostname(instance, uid)
def panel_hostname(node_name, instance, uid):
"""Formats hostname for a docker hosting onepanel.
NOTE: Hostnames are also used as docker names!
"""
return common.format_hostname([node_name, instance], uid)
def panel_erl_node_name(node_name, instance, uid):
"""Formats erlang node name for a vm on onepanel docker.
"""
hostname = panel_hostname(node_name, instance, uid)
return common.format_erl_node_name('onepanel', hostname)
def _tweak_config(config, name, onepanel_instance, uid):
cfg = copy.deepcopy(config)
cfg['nodes'] = {'node': cfg['nodes'][name]}
vm_args = cfg['nodes']['node']['vm.args']
vm_args['name'] = panel_erl_node_name(name, onepanel_instance, uid)
return cfg
def _node_up(image, bindir, config, dns_servers, extra_volumes, logdir):
node_name = config['nodes']['node']['vm.args']['name']
(name, sep, hostname) = node_name.partition('@')
command = \
'''set -e
mkdir -p /root/bin/node/log/
echo 'while ((1)); do chown -R {uid}:{gid} /root/bin/node/log; sleep 1; done' > /root/bin/chown_logs.sh
bash /root/bin/chown_logs.sh &
cat <<"EOF" > /tmp/gen_dev_args.json
{gen_dev_args}
EOF
escript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json
/root/bin/node/bin/onepanel console'''
command = command.format(
uid=os.geteuid(),
gid=os.getegid(),
gen_dev_args=json.dumps({'onepanel': config}))
bindir = os.path.abspath(bindir)
volumes = [(bindir, bindir, 'ro')]
volumes.extend(extra_volumes)
if logdir:
logdir = os.path.join(os.path.abspath(logdir), hostname)
os.makedirs(logdir)
volumes.append((logdir, '/root/bin/node/log', 'rw'))
container = docker.run(
image=image,
name=hostname,
hostname=hostname,
detach=True,
interactive=True,
tty=True,
workdir=bindir,
volumes=volumes,
dns_list=dns_servers,
privileged=True,
command=command)
return (
{
'docker_ids': [container],
'onepanel_nodes': [node_name]
}
)
def _configure_posix_storage(storages):
posix_storages = filter(lambda storage: storage.get('type') == 'posix',
storages)
posix_storage_out = {}
volumes = []
for storage in posix_storages:
name = storage.get('name')
if name:
(host_path, docker_path, mode) = common.volume_for_storage(name)
volumes.append((host_path, docker_path, mode))
posix_storage_out[name] = {
"host_path": host_path, "docker_path": docker_path
}
return volumes, {"storages": {"posix": posix_storage_out}}
def up(image, bindir, dns_server, uid, config_path, storages_dockers=None,
logdir=None):
config = common.parse_json_config_file(config_path)
input_dir = config['dirs_config']['onepanel']['input_dir']
dns_servers, output = dns.maybe_start(dns_server, uid)
for onepanel_instance in config['onepanel_domains']:
instance_config = config['onepanel_domains'][onepanel_instance]
image = instance_config.get('image', image)
os_config_name = instance_config.get('os_config')
storages = config.get('os_configs', {}).get(os_config_name, {}). \
get('storages', [])
extra_volumes, posix_storage_out = _configure_posix_storage(storages)
common.merge(output, posix_storage_out)
# Check if gui override is enabled in env and start it
if 'gui_override' in instance_config and isinstance(
instance_config['gui_override'], dict):
gui_config = instance_config['gui_override']
onepanel_hostname = panel_domain(onepanel_instance, uid)
extra_volumes.extend(
gui.extra_volumes(gui_config, onepanel_hostname))
gui.override_gui(gui_config, onepanel_hostname)
gen_dev_cfg = {
'config': {
'input_dir': input_dir,
'target_dir': '/root/bin'
},
'nodes': instance_config['onepanel']
}
configs = [_tweak_config(gen_dev_cfg, node, onepanel_instance, uid)
for node in gen_dev_cfg['nodes']]
for cfg in configs:
node_out = _node_up(image, bindir, cfg, dns_servers, extra_volumes,
logdir)
common.merge(output, node_out)
# Check if gui livereload is enabled in env and turn it on
if 'gui_override' in instance_config and isinstance(
instance_config['gui_override'], dict):
gui_config = instance_config['gui_override']
livereload_flag = gui_config['livereload']
if livereload_flag:
livereload_dir = gui_config['mount_path']
gui.run_livereload(node_out['docker_ids'][0], livereload_dir)
return output
| {
"content_hash": "50b69874b6a06b8c8142125ea4c014b4",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 103,
"avg_line_length": 33.39506172839506,
"alnum_prop": 0.6046210720887246,
"repo_name": "kliput/onezone-gui",
"id": "f8d08fa398f38c5185e8a73761947dba274c10ac",
"size": "5425",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bamboos/docker/environment/panel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "115095"
},
{
"name": "HTML",
"bytes": "1879"
},
{
"name": "JavaScript",
"bytes": "7877"
},
{
"name": "Makefile",
"bytes": "784"
},
{
"name": "Python",
"bytes": "210437"
},
{
"name": "Shell",
"bytes": "614"
}
],
"symlink_target": ""
} |
"""
Generate graph of Double Elo model's error for given K.
"""
__author__ = 'riko'
import matplotlib.pyplot as plt
import numpy as np
import data_tools as dt
import models
elo = models.DoubleEloModel()
train_data = dt.get_main_matches_data()
x = np.array([1.0 * i / 4 for i in range(4, 120)])
y = np.array([elo._train_params([p], train_data, verbose=True) for p in x])
fig = plt.figure()
plt.plot(x, y, '-r')
fig.suptitle('', fontsize=20)
plt.ylabel('Improved model error', fontsize=10)
plt.xlabel('K', fontsize=10)
plt.show() | {
"content_hash": "a662f5755c257e1fae37e7e6d81c1d6b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 18.551724137931036,
"alnum_prop": 0.6747211895910781,
"repo_name": "erix5son/Tennis-Modelling",
"id": "ebd04721897a48389c8f2443de6504756a604b64",
"size": "538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/elo_k_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4573"
},
{
"name": "HTML",
"bytes": "9036"
},
{
"name": "JavaScript",
"bytes": "25666"
},
{
"name": "OpenEdge ABL",
"bytes": "21838"
},
{
"name": "Python",
"bytes": "117304"
}
],
"symlink_target": ""
} |
__author__ = 'Stephanie Federwisch'
"""Tests for statistics continuous computations."""
from core import jobs_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_jobs
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
from core.tests import test_utils
import feconf
class ModifiedStatisticsAggregator(stats_jobs.StatisticsAggregator):
"""A modified StatisticsAggregator that does not start a new batch
job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return ModifiedStatisticsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class ModifiedStatisticsMRJobManager(stats_jobs.StatisticsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return ModifiedStatisticsAggregator
class StatsAggregatorUnitTests(test_utils.GenericTestBase):
"""Tests for statistics aggregations."""
ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS = [
ModifiedStatisticsAggregator]
def _record_start(self, exp_id, exp_version, state, session_id):
event_services.StartExplorationEventHandler.record(
exp_id, exp_version, state, session_id, {},
feconf.PLAY_TYPE_NORMAL)
def _record_leave(self, exp_id, exp_version, state, session_id):
event_services.MaybeLeaveExplorationEventHandler.record(
exp_id, exp_version, state, session_id, 27, {},
feconf.PLAY_TYPE_NORMAL)
def _record_state_hit(self, exp_id, exp_version, state, session_id):
event_services.StateHitEventHandler.record(
exp_id, exp_version, state, session_id, {},
feconf.PLAY_TYPE_NORMAL)
def _create_state_counter(self, exp_id, state, first_entry_count):
counter = stats_models.StateCounterModel.get_or_create(exp_id, state)
counter.first_entry_count = first_entry_count
counter.put()
def test_state_hit(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
exp_id = 'eid'
exp_version = 1
exploration = self.save_new_valid_exploration(exp_id, 'owner')
state = exploration.init_state_name
state2 = 'sid2'
self._record_state_hit(exp_id, exp_version, state, 'session1')
self._record_state_hit(exp_id, exp_version, state, 'session2')
self._create_state_counter(exp_id, state, 18)
self._record_state_hit(exp_id, exp_version, state2, 'session1')
self._create_state_counter(exp_id, state2, 9)
self.process_and_flush_pending_tasks()
ModifiedStatisticsAggregator.start_computation()
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
output_model = stats_jobs.StatisticsAggregator.get_statistics(
exp_id, exp_version)
self.assertEqual(
output_model['state_hit_counts'][state]['first_entry_count'],
2)
self.assertEqual(
output_model['state_hit_counts'][state2]['first_entry_count'],
1)
output_model = stats_jobs.StatisticsAggregator.get_statistics(
exp_id, stats_jobs._NO_SPECIFIED_VERSION_STRING)
self.assertEqual(
output_model['state_hit_counts'][state]['first_entry_count'],
18)
self.assertEqual(
output_model['state_hit_counts'][state2]['first_entry_count'],
9)
output_model = stats_jobs.StatisticsAggregator.get_statistics(
exp_id, stats_jobs._ALL_VERSIONS_STRING)
self.assertEqual(
output_model['state_hit_counts'][state]['first_entry_count'],
20)
self.assertEqual(
output_model['state_hit_counts'][state2]['first_entry_count'],
10)
def test_no_completion(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
exp_id = 'eid'
exp_version = 1
exploration = self.save_new_valid_exploration(exp_id, 'owner')
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state, 'session1')
self._record_start(exp_id, exp_version, state, 'session2')
self.process_and_flush_pending_tasks()
ModifiedStatisticsAggregator.start_computation()
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
model_id = '%s:%s' % (exp_id, exp_version)
output_model = stats_models.ExplorationAnnotationsModel.get(
model_id)
self.assertEqual(output_model.num_starts, 2)
self.assertEqual(output_model.num_completions, 0)
def test_all_complete(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
exp_id = 'eid'
exp_version = 1
exploration = self.save_new_valid_exploration(exp_id, 'owner')
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state, 'session1')
self._record_leave(
exp_id, exp_version, feconf.END_DEST, 'session1')
self._record_start(exp_id, exp_version, state, 'session2')
self._record_leave(
exp_id, exp_version, feconf.END_DEST, 'session2')
self.process_and_flush_pending_tasks()
ModifiedStatisticsAggregator.start_computation()
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
model_id = '%s:%s' % (exp_id, exp_version)
output_model = stats_models.ExplorationAnnotationsModel.get(
model_id)
self.assertEqual(output_model.num_starts, 2)
self.assertEqual(output_model.num_completions, 2)
def test_multiple_maybe_leaves_same_session(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
exp_id = 'eid'
exp_version = 1
exploration = self.save_new_valid_exploration(exp_id, 'owner')
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state, 'session1')
self._record_leave(exp_id, exp_version, state, 'session1')
self._record_leave(exp_id, exp_version, state, 'session1')
self._record_leave(
exp_id, exp_version, feconf.END_DEST, 'session1')
self._record_start(exp_id, exp_version, state, 'session2')
self._record_leave(exp_id, exp_version, state, 'session2')
self._record_leave(exp_id, exp_version, state, 'session2')
self.process_and_flush_pending_tasks()
ModifiedStatisticsAggregator.start_computation()
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
model_id = '%s:%s' % (exp_id, exp_version)
output_model = stats_models.ExplorationAnnotationsModel.get(
model_id)
self.assertEqual(output_model.num_starts, 2)
self.assertEqual(output_model.num_completions, 1)
def test_multiple_explorations(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
exp_version = 1
exp_id_1 = 'eid1'
exploration = self.save_new_valid_exploration(exp_id_1, 'owner')
state_1_1 = exploration.init_state_name
exp_id_2 = 'eid2'
exploration = self.save_new_valid_exploration(exp_id_2, 'owner')
state_2_1 = exploration.init_state_name
EMPTY_STATE_HIT_COUNTS_DICT = {
'First State': {
'total_entry_count': 0,
'no_answer_count': 0,
'first_entry_count': 0,
},
}
# Record 2 start events for exp_id_1 and 1 start event for
# exp_id_2.
self._record_start(exp_id_1, exp_version, state_1_1, 'session1')
self._record_start(exp_id_1, exp_version, state_1_1, 'session2')
self._record_start(exp_id_2, exp_version, state_2_1, 'session3')
self.process_and_flush_pending_tasks()
ModifiedStatisticsAggregator.start_computation()
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
results = ModifiedStatisticsAggregator.get_statistics(
exp_id_1, 'all')
self.assertDictContainsSubset({
'start_exploration_count': 2,
'complete_exploration_count': 0,
'state_hit_counts': EMPTY_STATE_HIT_COUNTS_DICT,
}, results)
results = ModifiedStatisticsAggregator.get_statistics(
exp_id_2, 'all')
self.assertDictContainsSubset({
'start_exploration_count': 1,
'complete_exploration_count': 0,
'state_hit_counts': EMPTY_STATE_HIT_COUNTS_DICT,
}, results)
# Record 1 more start event for exp_id_1 and 1 more start event
# for exp_id_2.
self._record_start(exp_id_1, exp_version, state_1_1, 'session2')
self._record_start(exp_id_2, exp_version, state_2_1, 'session3')
self.process_and_flush_pending_tasks()
results = ModifiedStatisticsAggregator.get_statistics(
exp_id_1, 'all')
self.assertDictContainsSubset({
'start_exploration_count': 3,
'complete_exploration_count': 0,
'state_hit_counts': EMPTY_STATE_HIT_COUNTS_DICT,
}, results)
results = ModifiedStatisticsAggregator.get_statistics(
exp_id_2, 'all')
self.assertDictContainsSubset({
'start_exploration_count': 2,
'complete_exploration_count': 0,
'state_hit_counts': EMPTY_STATE_HIT_COUNTS_DICT,
}, results)
class OneOffNullStateHitEventsMigratorTest(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
def setUp(self):
super(OneOffNullStateHitEventsMigratorTest, self).setUp()
self.save_new_valid_exploration(
self.EXP_ID, 'user_id', 'title', 'category')
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
# Create one good and two bad StateHit events.
event_services.StateHitEventHandler.record(
self.EXP_ID, 1, exploration.init_state_name,
'good_session_id', {}, feconf.PLAY_TYPE_NORMAL)
event_services.StateHitEventHandler.record(
self.EXP_ID, 1, None,
'bad_session_id_1', {'a': 'b'}, feconf.PLAY_TYPE_NORMAL)
event_services.StateHitEventHandler.record(
self.EXP_ID, 1, None,
'bad_session_id_2', {}, feconf.PLAY_TYPE_NORMAL)
self.process_and_flush_pending_tasks()
def test_migration_job_works(self):
self.assertEqual(
stats_models.StateHitEventLogEntryModel.query().count(), 3)
self.assertEqual(
stats_models.MaybeLeaveExplorationEventLogEntryModel.query().count(),
0)
# Store a temporary copy of the instance corresponding to
# bad_session_id_1.
source_item = None
for item in stats_models.StateHitEventLogEntryModel.query():
if item.session_id == 'bad_session_id_1':
source_item = item
# Run the job once.
job_id = (stats_jobs.NullStateHitEventsMigrator.create_new())
stats_jobs.NullStateHitEventsMigrator.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
stats_models.StateHitEventLogEntryModel.query().count(), 1)
self.assertEqual(
stats_models.MaybeLeaveExplorationEventLogEntryModel.query().count(),
2)
self.assertEqual(
stats_jobs.NullStateHitEventsMigrator.get_output(job_id),
[['migrated_instances', ['exp_id v1', 'exp_id v1']]])
# Run the job again; nothing new should happen.
new_job_id = (stats_jobs.NullStateHitEventsMigrator.create_new())
stats_jobs.NullStateHitEventsMigrator.enqueue(new_job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
stats_models.StateHitEventLogEntryModel.query().count(), 1)
self.assertEqual(
stats_models.MaybeLeaveExplorationEventLogEntryModel.query().count(),
2)
self.assertEqual(
stats_jobs.NullStateHitEventsMigrator.get_output(new_job_id), [])
target_item = None
for item in stats_models.MaybeLeaveExplorationEventLogEntryModel.query():
if item.session_id == 'bad_session_id_1':
target_item = item
self.assertIsNotNone(target_item)
self.assertNotEqual(source_item, target_item)
self.assertNotEqual(source_item.id, target_item.id)
self.assertEqual(
target_item.event_type, feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION)
self.assertEqual(
source_item.exploration_id, target_item.exploration_id)
self.assertEqual(
source_item.exploration_version, target_item.exploration_version)
self.assertEqual(target_item.state_name, feconf.END_DEST)
self.assertEqual(target_item.client_time_spent_in_secs, 0)
self.assertEqual(source_item.params, target_item.params)
self.assertEqual(source_item.play_type, target_item.play_type)
self.assertEqual(source_item.created_on, target_item.created_on)
# It is not possible to set the last_updated field explicitly.
self.assertLess(source_item.last_updated, target_item.last_updated)
self.assertEqual(source_item.deleted, target_item.deleted)
self.assertEqual(target_item.deleted, False)
| {
"content_hash": "e6c9dbd4a243f4d5ac8ac9ccbee0b1b7",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 81,
"avg_line_length": 42.873198847262245,
"alnum_prop": 0.612018552127445,
"repo_name": "fernandopinhati/oppia",
"id": "577c68b0fed7ab824c6897789e73f011e95a4bed",
"size": "15500",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "core/domain/stats_jobs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "44925"
},
{
"name": "HTML",
"bytes": "256657"
},
{
"name": "JavaScript",
"bytes": "1264341"
},
{
"name": "Python",
"bytes": "1422217"
},
{
"name": "Shell",
"bytes": "24808"
}
],
"symlink_target": ""
} |
from muntjac.api import \
(VerticalLayout, TextField, RichTextArea, HorizontalLayout,
NativeSelect, Slider, Button, Alignment)
from muntjac.ui.button import IClickListener
from muntjac.ui.window import Notification
class NotificationCustomExample(VerticalLayout):
_CAPTION_PROPERTY = 'CAPTION'
def __init__(self):
super(NotificationCustomExample, self).__init__()
self.setSpacing(True)
caption = TextField('Caption', 'Message sent')
caption.setDescription(('Main info; a short caption-only '
'notification is often most effective.'))
caption.setWidth('200px')
self.addComponent(caption)
description = RichTextArea()
description.setWidth('100%')
description.setValue('<p>to <i>[email protected]</i></p>')
description.setCaption('Description')
description.setDescription(('Additional information; '
'try to keep it short.'))
self.addComponent(description)
horiz = HorizontalLayout()
horiz.setSpacing(True)
self.addComponent(horiz)
position = NativeSelect('Position')
position.setNullSelectionAllowed(False)
horiz.addComponent(position)
self.initPositionItems(position)
style = NativeSelect('Style')
style.setNullSelectionAllowed(False)
horiz.addComponent(style)
self.initTypeItems(style)
delay = Slider('Delay (msec), -1 means click to hide')
delay.setDescription(('Delay before fading<br/>Pull all the way to '
'the left to get -1, which means forever (click to hide).'))
delay.setWidth('100%') # 'description' will push width
delay.setMin(Notification.DELAY_FOREVER)
delay.setMax(10000)
self.addComponent(delay)
# TODO icon select
l = ShowListener(self, caption, description, style, position, delay)
show = Button('Show notification', l)
self.addComponent(show)
self.setComponentAlignment(show, Alignment.MIDDLE_RIGHT)
def initPositionItems(self, position):
# Helper to fill the position select with the various possibilities
position.addContainerProperty(self._CAPTION_PROPERTY, str, None)
position.setItemCaptionPropertyId(self._CAPTION_PROPERTY)
i = position.addItem(Notification.POSITION_TOP_LEFT)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Top left')
i = position.addItem(Notification.POSITION_CENTERED_TOP)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Top centered')
i = position.addItem(Notification.POSITION_TOP_RIGHT)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Top right')
i = position.addItem(Notification.POSITION_CENTERED)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Centered')
i = position.addItem(Notification.POSITION_BOTTOM_LEFT)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Bottom left')
i = position.addItem(Notification.POSITION_CENTERED_BOTTOM)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Bottom, centered')
i = position.addItem(Notification.POSITION_BOTTOM_RIGHT)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Bottom right')
position.setValue(Notification.POSITION_CENTERED)
def initTypeItems(self, typ):
# Helper to fill the position select with the various possibilities
typ.addContainerProperty(self._CAPTION_PROPERTY, str, None)
typ.setItemCaptionPropertyId(self._CAPTION_PROPERTY)
i = typ.addItem(Notification.TYPE_HUMANIZED_MESSAGE)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Humanized')
i = typ.addItem(Notification.TYPE_WARNING_MESSAGE)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Warning')
i = typ.addItem(Notification.TYPE_ERROR_MESSAGE)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Error')
i = typ.addItem(Notification.TYPE_TRAY_NOTIFICATION)
c = i.getItemProperty(self._CAPTION_PROPERTY)
c.setValue('Tray')
typ.setValue(Notification.TYPE_HUMANIZED_MESSAGE)
class ShowListener(IClickListener):
def __init__(self, c, caption, description, style, position, delay):
self._c = c
self._caption = caption
self._description = description
self._style = style
self._position = position
self._delay = delay
def buttonClick(self, event):
# create Notification instance and customize
n = Notification(self._caption.getValue(),
self._description.getValue(), self._style.getValue())
n.setPosition(self._position.getValue())
d = self._delay.getValue()
n.setDelayMsec( int(d) ) # sec->msec
self._c.getWindow().showNotification(n)
| {
"content_hash": "e1e6f948f2348fa2d1fb864bb6156e94",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 76,
"avg_line_length": 39.714285714285715,
"alnum_prop": 0.6604716227018386,
"repo_name": "rwl/muntjac",
"id": "e2bc2c82a6b4a6bbe00cb80c9212b607bd96a80a",
"size": "5005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muntjac/demo/sampler/features/notifications/NotificationCustomExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8602"
},
{
"name": "Java",
"bytes": "2243"
},
{
"name": "JavaScript",
"bytes": "32438"
},
{
"name": "Python",
"bytes": "3212361"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug = True)
| {
"content_hash": "f6517af6f49624f65796617dd72884c6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 40,
"avg_line_length": 21.9,
"alnum_prop": 0.634703196347032,
"repo_name": "jiobert/python",
"id": "18e109efc8185ffaf119eaa5a9f6bc728a94437e",
"size": "219",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "Apatira_Authman/Assignments/flaskolympics/olympics1/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25381"
},
{
"name": "HTML",
"bytes": "256675"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "399336"
}
],
"symlink_target": ""
} |
from .response import Response
class SearchResponse(Response):
"""Defines the top-level object that the response includes when the request
succeeds.
Variables are only populated by the server, and will be ignored when
sending a request.
:param _type: Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar query_context: An object that contains the query string that Bing
used for the request. This object contains the query string as entered by
the user. It may also contain an altered query string that Bing used for
the query if the query string contained a spelling mistake.
:vartype query_context:
~azure.cognitiveservices.search.websearch.models.QueryContext
:ivar web_pages: A list of webpages that are relevant to the search query.
:vartype web_pages:
~azure.cognitiveservices.search.websearch.models.WebWebAnswer
:ivar images: A list of images that are relevant to the search query.
:vartype images: ~azure.cognitiveservices.search.websearch.models.Images
:ivar news: A list of news articles that are relevant to the search query.
:vartype news: ~azure.cognitiveservices.search.websearch.models.News
:ivar related_searches: A list of related queries made by others.
:vartype related_searches:
~azure.cognitiveservices.search.websearch.models.RelatedSearchesRelatedSearchAnswer
:ivar spell_suggestions: The query string that likely represents the
user's intent.
:vartype spell_suggestions:
~azure.cognitiveservices.search.websearch.models.SpellSuggestions
:ivar time_zone: The date and time of one or more geographic locations.
:vartype time_zone:
~azure.cognitiveservices.search.websearch.models.TimeZone
:ivar videos: A list of videos that are relevant to the search query.
:vartype videos: ~azure.cognitiveservices.search.websearch.models.Videos
:ivar computation: The answer to a math expression or units conversion
expression.
:vartype computation:
~azure.cognitiveservices.search.websearch.models.Computation
:ivar ranking_response: The order that Bing suggests that you display the
search results in.
:vartype ranking_response:
~azure.cognitiveservices.search.websearch.models.RankingRankingResponse
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'query_context': {'readonly': True},
'web_pages': {'readonly': True},
'images': {'readonly': True},
'news': {'readonly': True},
'related_searches': {'readonly': True},
'spell_suggestions': {'readonly': True},
'time_zone': {'readonly': True},
'videos': {'readonly': True},
'computation': {'readonly': True},
'ranking_response': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'web_pages': {'key': 'webPages', 'type': 'WebWebAnswer'},
'images': {'key': 'images', 'type': 'Images'},
'news': {'key': 'news', 'type': 'News'},
'related_searches': {'key': 'relatedSearches', 'type': 'RelatedSearchesRelatedSearchAnswer'},
'spell_suggestions': {'key': 'spellSuggestions', 'type': 'SpellSuggestions'},
'time_zone': {'key': 'timeZone', 'type': 'TimeZone'},
'videos': {'key': 'videos', 'type': 'Videos'},
'computation': {'key': 'computation', 'type': 'Computation'},
'ranking_response': {'key': 'rankingResponse', 'type': 'RankingRankingResponse'},
}
def __init__(self):
super(SearchResponse, self).__init__()
self.query_context = None
self.web_pages = None
self.images = None
self.news = None
self.related_searches = None
self.spell_suggestions = None
self.time_zone = None
self.videos = None
self.computation = None
self.ranking_response = None
self._type = 'SearchResponse'
| {
"content_hash": "f37be98d5b72cc5bcf691715aa1b5f67",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 101,
"avg_line_length": 45.3125,
"alnum_prop": 0.6554022988505747,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "10bfdb41e025aaee3df7dc27fa3d6cfaac57b413",
"size": "4824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-cognitiveservices-search-websearch/azure/cognitiveservices/search/websearch/models/search_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""
High-level table operations:
- join()
- setdiff()
- hstack()
- vstack()
- dstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import collections
import itertools
from collections import OrderedDict, Counter
from collections.abc import Mapping, Sequence
import numpy as np
from astropy.utils import metadata
from .table import Table, QTable, Row, Column, MaskedColumn
from astropy.units import Quantity
from astropy.utils.compat import NUMPY_LT_1_17
from . import _np_utils
from .np_utils import fix_column_name, TableMergeError
__all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique']
def _merge_table_meta(out, tables, metadata_conflicts='warn'):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
# Make sure we have a list of things
if not isinstance(tables, Sequence):
tables = [tables]
# Make sure there is something to stack
if len(tables) == 0:
raise ValueError('no values provided to stack.')
# Convert inputs (Table, Row, or anything column-like) to Tables.
# Special case that Quantity converts to a QTable.
for ii, val in enumerate(tables):
if isinstance(val, Table):
pass
elif isinstance(val, Row):
tables[ii] = Table(val)
elif isinstance(val, Quantity):
tables[ii] = QTable([val])
else:
try:
tables[ii] = Table([val])
except (ValueError, TypeError):
raise TypeError('cannot convert {} to table column.'
.format(val))
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(not issubclass(out_class, obj.__class__) for obj in objs):
raise ValueError('unmergeable object classes {}'
.format([obj.__class__.__name__ for obj in objs]))
return out_class
def join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'], metadata_conflicts='warn'):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : Table object or a value that will initialize a Table object
Left side table in the join
right : Table object or a value that will initialize a Table object
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(left, right, keys, join_type,
uniq_col_name, table_names, col_name_map, metadata_conflicts)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
#Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1,'table1'), (table2,'table2')):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError("The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str))
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type='left', keys=keys,
metadata_conflicts='silent')
# If t12 index2 is masked then that means some rows were in table1 but not table2.
if hasattr(t12['__index2__'], 'mask'):
# Define bool mask of table1 rows not in table2
diff = t12['__index2__'].mask
# Get the row indices of table1 for those rows
idx = t12['__index1__'][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def dstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack columns within tables depth-wise
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : Table or list of Table objects
Table(s) to stack along depth-wise with the current table
Table columns should have same shape and name for depth-wise stacking
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(dstack([t1, t2]))
a [2] b [2]
------ ------
1 .. 5 3 .. 7
2 .. 6 4 .. 8
"""
tables = _get_list_of_tables(tables)
if len(tables) == 1:
return tables[0] # no point in stacking a single table
n_rows = set(len(table) for table in tables)
if len(n_rows) != 1:
raise ValueError('Table lengths must all match for dstack')
n_row = n_rows.pop()
out = vstack(tables, join_type, metadata_conflicts)
for name, col in out.columns.items():
col = out[name]
# Reshape to so each original column is now in a row.
# If entries are not 0-dim then those additional shape dims
# are just carried along.
# [x x x y y y] => [[x x x],
# [y y y]]
col.shape = (len(tables), n_row) + col.shape[1:]
# Transpose the table and row axes to get to
# [[x, y],
# [x, y]
# [x, y]]
axes = np.arange(len(col.shape))
axes[:2] = [1, 0]
# This temporarily makes `out` be corrupted (columns of different
# length) but it all works out in the end.
out.columns.__setitem__(name, col.transpose(axes), validated=True)
return out
def vstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack tables vertically (along rows)
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : Table or list of Table objects
Table(s) to stack along rows (vertically) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(tables, join_type='outer',
uniq_col_name='{col_name}_{table_name}', table_names=None,
metadata_conflicts='warn'):
"""
Stack tables along columns (horizontally)
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : List of Table objects
Tables to stack along columns (horizontally) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names,
col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep='first'):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : `~astropy.table.Table` object or a value that
will initialize a `~astropy.table.Table` object
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : one of 'first', 'last' or 'none'
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : bool
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ('first', 'last', 'none'):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
# Check for columns with masked values
nkeys = 0
for key in keys[:]:
col = input_table[key]
if hasattr(col, 'mask') and np.any(col.mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{}' from keys and rerun "
"unique()".format(key))
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError("no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns")
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == 'first':
indices = indices[:-1]
elif keep == 'last':
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(names[0], tme._incompat_types))
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError(f'Key columns {names!r} have different shape')
shape = uniq_shapes.pop()
out_descrs.append((fix_column_name(out_name), dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError('Columns have incompatible types {}'
.format(err._incompat_types))
tme._incompat_types = err._incompat_types
raise tme
def _get_join_sort_idxs(keys, left, right):
# Go through each of the key columns in order and make columns for
# a new structured array that represents the lexical ordering of those
# key columns. This structured array is then argsort'ed. The trick here
# is that some columns (e.g. Time) may need to be expanded into multiple
# columns for ordering here.
ii = 0 # Index for uniquely naming the sort columns
sort_keys_dtypes = [] # sortable_table dtypes as list of (name, dtype_str, shape) tuples
sort_keys = [] # sortable_table (structured ndarray) column names
sort_left = {} # sortable ndarrays from left table
sort_right = {} # sortable ndarray from right table
for key in keys:
# get_sortable_arrays() returns a list of ndarrays that can be lexically
# sorted to represent the order of the column. In most cases this is just
# a single element of the column itself.
left_sort_cols = left[key].info.get_sortable_arrays()
right_sort_cols = right[key].info.get_sortable_arrays()
if len(left_sort_cols) != len(right_sort_cols):
# Should never happen because cols are screened beforehand for compatibility
raise RuntimeError('mismatch in sort cols lengths')
for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols):
# Check for consistency of shapes. Mismatch should never happen.
shape = left_sort_col.shape[1:]
if shape != right_sort_col.shape[1:]:
raise RuntimeError('mismatch in shape of left vs. right sort array')
if shape != ():
raise ValueError(f'sort key column {key!r} must be 1-d')
sort_key = str(ii)
sort_keys.append(sort_key)
sort_left[sort_key] = left_sort_col
sort_right[sort_key] = right_sort_col
# Build up dtypes for the structured array that gets sorted.
dtype_str = common_dtype([left_sort_col, right_sort_col])
sort_keys_dtypes.append((sort_key, dtype_str))
ii += 1
# Make the empty sortable table and fill it
len_left = len(left)
sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes)
for key in sort_keys:
sortable_table[key][:len_left] = sort_left[key]
sortable_table[key][len_left:] = sort_right[key]
# Finally do the (lexical) argsort and make a new sorted version
idx_sort = sortable_table.argsort(order=sort_keys)
sorted_table = sortable_table[idx_sort]
# Get indexes of unique elements (i.e. the group boundaries)
diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True]))
idxs = np.flatnonzero(diffs)
return idxs, idx_sort
def _join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'],
col_name_map=None, metadata_conflicts='warn'):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
from astropy.time import Time
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Special column name for cartesian join, should never collide with real column
cartesian_index_name = '__table_cartesian_join_temp_index__'
if join_type not in ('inner', 'outer', 'left', 'right', 'cartesian'):
raise ValueError("The 'join_type' argument should be in 'inner', "
"'outer', 'left', 'right', or 'cartesian' "
"(got '{}' instead)".
format(join_type))
if join_type == 'cartesian':
if keys:
raise ValueError('cannot supply keys for a cartesian join')
# Make light copies of left and right, then add temporary index columns
# with all the same value so later an outer join turns into a cartesian join.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
left[cartesian_index_name] = np.uint8(0)
right[cartesian_index_name] = np.uint8(0)
keys = (cartesian_index_name, )
# If we have a single key, put it in a tuple
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError('No keys in common between left and right tables')
elif isinstance(keys, str):
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, 'Left'), (right, 'Right')):
for name in keys:
if name not in arr.colnames:
raise TableMergeError('{} table does not have key column {!r}'
.format(arr_label, name))
if hasattr(arr[name], 'mask') and np.any(arr[name].mask):
raise TableMergeError('{} key column {!r} has missing values'
.format(arr_label, name))
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError('input tables for join must both have at least one row')
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
try:
idxs, idx_sort = _get_join_sort_idxs(keys, left, right)
except NotImplementedError:
raise TypeError('one or more key columns are not sortable')
# Main inner loop in Cython to compute the cartesian product
# indices for the given join type
int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3,
'cartesian': 1}[join_type]
masked, n_out, left_out, left_mask, right_out, right_mask = \
_np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)
out = _get_out_class([left, right])()
for out_name, dtype, shape in out_descrs:
if out_name == cartesian_index_name:
continue
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('join unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name)
if not NUMPY_LT_1_17 or issubclass(col_cls, (Column, Time)):
out[out_name][:] = np.where(right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out))
else:
# np.where does not work for mixin columns (e.g. Quantity) so
# use a slower workaround.
non_right_mask = ~right_mask
if np.any(right_mask):
out[out_name][:] = left[left_name].take(left_out)
if np.any(non_right_mask):
out[out_name][non_right_mask] = right[right_name].take(right_out)[non_right_mask]
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = right_name, right, right_out, right_mask
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Select the correct elements from the original table
col = array[name][array_out]
# If the output column is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, col.shape)
try:
col[array_mask] = col.info.mask_val
except Exception: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__))
# Set the output table column to the new joined column
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'):
"""
Stack Tables vertically (by rows)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Input validation
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == 'exact':
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError('Inconsistent columns in input arrays '
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)")
join_type = 'outer'
# For an inner join, keep only columns where all input arrays have that column
if join_type == 'inner':
col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items()
if all(x is not None for x in in_names))
if len(col_name_map) == 0:
raise TableMergeError('Input arrays have no columns in common')
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('vstack unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
try:
col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(out_name, err._incompat_types))
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
col[idx0:idx1] = array[name]
else:
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
try:
col[idx0:idx1] = col.info.mask_val
except Exception:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__))
idx0 = idx1
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}',
table_names=None, col_name_map=None):
"""
Stack tables horizontally (by columns)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Input validation
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("join_type arg must be either 'inner', 'exact' or 'outer'")
if table_names is None:
table_names = ['{}'.format(ii + 1) for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError('Number of arrays must match number of table_names')
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == 'exact':
if len(set(arr_lens)) > 1:
raise TableMergeError("Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)")
join_type = 'outer'
# For an inner join, keep only the common rows
if join_type == 'inner':
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
n_rows = max(arr_lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
col = array[name][indices]
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
try:
col[arr_len:] = col.info.mask_val
except Exception:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__))
else:
col = array[name][:n_rows]
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
| {
"content_hash": "43a861a3bcb8292cd02415f9f5eae820",
"timestamp": "",
"source": "github",
"line_count": 1148,
"max_line_length": 101,
"avg_line_length": 37.224738675958186,
"alnum_prop": 0.5829316235316142,
"repo_name": "stargaser/astropy",
"id": "baa3e2408065ab50e0afa9d71c8e706b358088eb",
"size": "42734",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "astropy/table/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898387"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import time
from collections import OrderedDict
from userver.database.db0 import db0,ConstDB
from utils.log import logger
class ConstStatis:
dev = 'DEV'
group = 'GROUP'
class Statistician(object):
def __init__(self, eui, type):
if type == 'DEV':
self.eui = ConstDB.dev + eui
elif type == 'GROUP':
self.eui = ConstDB.group + eui
self.push_name = ConstDB.statistics_up + eui + ':' + datetime.now().strftime("%Y-%m-%d")
self.poll_name = ConstDB.statistics_down + eui + ':' + datetime.now().strftime("%Y-%m-%d")
def push_sum_of_one_day(self):
"""
返回上行整一天的和
:return:
"""
res = db0.hgetall(self.push_name)
values = res.values()
sum = 0
for v in values:
sum += int(v)
return sum
def poll_sum_of_one_day(self):
"""
返回下行整一天的和
:return:
"""
res = db0.hgetall(self.poll_name)
values = res.values()
sum = 0
for v in values:
sum += int(v)
return sum
def push_count_in_daily(self):
"""
一天为单位计数
:return:返回30天前的上行数据
"""
res = OrderedDict()
for x in range(0, 30):
date = datetime.now() - timedelta(x)
timestamp = int(date.timestamp())
date_str = date.strftime("%Y-%m-%d")
name = self.push_name.rsplit(':', 1)[0] + ':' + date_str
sum = 0
values = db0.hgetall(name).values()
for v in values:
sum += int(v)
res[timestamp] = sum
return res
def poll_count_in_daily(self):
"""
一天为单位计数
:return:返回30天前的下行数据
"""
res = OrderedDict()
for x in range(0, 30):
date = datetime.now() - timedelta(x)
timestamp = int(date.timestamp())
date_str = date.strftime("%Y-%m-%d")
name = self.poll_name.rsplit(':', 1)[0] + ':' + date_str
sum = 0
values = db0.hgetall(name).values()
for v in values:
sum += int(v)
res[timestamp] = sum
return res
def push_count_in_hour(self):
"""
二十四小时前计数,
:return: 每小时对应的数量
"""
current_hour = datetime.now().hour
today = self.push_name.rsplit(':', 1)[1]
# 昨天距离现在24小时内每小时的数据
yestoday_count = OrderedDict()
if current_hour != 23:
yestoday = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
yestoday_name = self.push_name.rsplit(':', 1)[0] + ':' + yestoday
yestoday_hour = current_hour + 1
for hour in range(yestoday_hour, 24):
key = str(hour)
count_key = yestoday + ' ' + str(hour)
count = db0.hget(yestoday_name, key)
if count:
count = int(count.decode())
else:
count = 0
timestamp = int(datetime.strptime(count_key, '%Y-%m-%d %H').timestamp())
yestoday_count[timestamp] = count
# 今天的每个小时的数据
today_count = OrderedDict()
for hour in range(0, current_hour + 1):
key = str(hour)
count_key = today + ' ' + str(hour)
count = db0.hget(self.push_name, key)
if count:
count = int(count.decode())
else:
count = 0
timestamp = int(datetime.strptime(count_key, '%Y-%m-%d %H').timestamp())
today_count[timestamp] = count
today_count.update(yestoday_count)
return today_count
def poll_count_in_hour(self):
"""
二十四小时前计数,
:return: 每小时对应的数量
"""
current_hour = datetime.now().hour
today = self.poll_name.rsplit(':', 1)[1]
yestoday_count = OrderedDict()
if current_hour != 23:
yestoday = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
yestoday_name = self.poll_name.rsplit(':', 1)[0] + ':' + yestoday
yestoday_hour = current_hour + 1
for hour in range(yestoday_hour, 24):
key = str(hour)
count_key = yestoday + ' ' + str(hour)
count = db0.hget(yestoday_name, key)
if count:
count = int(count.decode())
else:
count = 0
count_key = int(datetime.strptime(count_key, '%Y-%m-%d %H').timestamp())
yestoday_count[count_key] = count
today_count = OrderedDict()
for hour in range(0, current_hour + 1):
key = str(hour)
count_key = today + ' ' + str(hour)
count = db0.hget(self.poll_name, key)
if count:
count = int(count.decode())
else:
count = 0
count_key = int(datetime.strptime(count_key, '%Y-%m-%d %H').timestamp())
today_count[count_key] = count
today_count.update(yestoday_count)
return today_count
def current_timestamp():
"""
当前时间戳,精确到秒
:return: int 时间戳
"""
return int(time.time())
def get_current_hour():
"""
当前小时数
:return: int
"""
return datetime.now().hour
if __name__ == '__main__':
statistician = Statistician('be7a0000000ffffe')
data = statistician.push_count_in_hour()
print(statistician.poll_count_in_hour())
print(statistician.push_sum_of_one_day())
print(statistician.poll_sum_of_one_day())
print(statistician.push_count_in_daily())
print(statistician.poll_count_in_daily()) | {
"content_hash": "04f5f8dba791b9e21af3fa7ddf80654e",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 98,
"avg_line_length": 30.06282722513089,
"alnum_prop": 0.5027864855451062,
"repo_name": "soybean217/lora-python",
"id": "dbff441f92732a2c47fcf70104f369f320d684eb",
"size": "5992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UServer/userver/poll_push_count/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "27647"
},
{
"name": "Python",
"bytes": "808327"
}
],
"symlink_target": ""
} |
class Person(object):
def __init__(self, name):
#create a person with name name
self.name = name
try:
firstBlank = name.rindex(' ')
self.lastName = name[firstBlank+1:]
except:
self.lastName = name
self.age = None
def getLastName(self):
#return self's last name
return self.lastName
def setAge(self, age):
#assumes age is an int greater than 0
#sets self's age to age (in years)
self.age = age
def getAge(self):
#assumes that self's age has been set
#returns self's current age in years
if self.age == None:
raise ValueError
return self.age
def __lt__(self, other):
#return True if self's name is lexicographically less
#than other's name, and False otherwise
if self.lastName == other.lastName:
return self.name < other.name
return self.lastName < other.lastName
def __str__(self):
#return self's name
return self.name
class USResident(Person):
"""
A Person who resides in the US.
"""
def __init__(self, name, status):
"""
Initializes a Person object. A USResident object inherits
from Person and has one additional attribute:
status: a string, one of "citizen", "legal_resident", "illegal_resident"
Raises a ValueError if status is not one of those 3 strings
"""
Person.__init__(self, name)
if status in ["citizen", "legal_resident", "illegal_resident"]:
self.status = status
else:
self.status = None
raise ValueError
def getStatus(self):
"""
Returns the status
"""
if self.status == None:
raise ValueError
return self.status | {
"content_hash": "67ab4368356b3e5c3ca7b2ae8a7c2d37",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 32.258620689655174,
"alnum_prop": 0.5622661678246926,
"repo_name": "spencerpomme/coconuts-on-fire",
"id": "51117c045e237cd19c4917c05c7907e53ebf2d46",
"size": "1930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edX/MITx6.00.1x/final/final_problem5.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43654"
},
{
"name": "C++",
"bytes": "2027"
},
{
"name": "CSS",
"bytes": "79"
},
{
"name": "HTML",
"bytes": "2955"
},
{
"name": "JavaScript",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "164695"
},
{
"name": "R",
"bytes": "7601"
}
],
"symlink_target": ""
} |
import logging
import time
import tablib
from calendar import Calendar
from collections import defaultdict, Counter, OrderedDict
from datetime import datetime, timedelta
from hashlib import md5
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse, reverse_lazy
from django.db.models import Count, Max, Q, Sum
from django.db.models.functions import Length
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseNotFound, JsonResponse
)
from django.shortcuts import get_object_or_404, redirect, render
from django.template.defaultfilters import timesince
from django.template.loader import render_to_string
from django.views.generic import DetailView, FormView, ListView, TemplateView, View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils import timezone
from django.utils.text import slugify
from django_fsm import TransitionNotAllowed
from django_rq import job
from notifications import queue
from redis_metrics import metric
from userprofile.forms import UserForm
from userprofile.models import UserProfile
from utils.db import get_max_order
from utils.forms import EmailForm, SetNewPasswordForm
from utils.dateutils import dates_range
from utils.user_utils import local_day_range, local_now, to_localtime
from . import user_feed
from . email import send_package_cta_email, send_package_enrollment_batch
from . forms import (
ActionForm,
ActionPriorityForm,
AcceptEnrollmentForm,
ActionTriggerForm,
CategoryForm,
ContentAuthorForm,
CTAEmailForm,
DisableTriggerForm,
EnrollmentReminderForm,
GoalForm,
MembersForm,
OrganizationForm,
PackageEnrollmentForm,
ProgramForm,
TitlePrefixForm,
TriggerForm,
UploadImageForm,
)
from . mixins import (
ContentAuthorMixin, ContentEditorMixin, ContentViewerMixin,
PackageManagerMixin, ReviewableUpdateMixin, StateFilterMixin,
StaffRequiredMixin,
)
from . models import (
Action,
Category,
DailyProgress,
Goal,
Organization,
PackageEnrollment,
Program,
Trigger,
UserCompletedAction,
UserGoal,
popular_actions,
popular_goals,
popular_categories,
)
from . permissions import (
ContentPermissions,
is_content_editor,
is_contributor,
permission_required,
staff_required,
superuser_required,
)
from . sequence import get_next_useractions_in_sequence
from . utils import num_user_selections
logger = logging.getLogger(__name__)
class BaseTransferView(FormView):
"""A base view that should be subclassed for models where we want to enable
transferring "ownership". The model must have some FK field to a User, and
be written in a way that assumes that user is the owner.
To use this, you must define the following:
* model: The Model class
* pk_field: The Primary Key field name (typically "pk" or "id")
* owner_field: The name of the FK to User. (e.g. "user" or "created_by")
This class also assumes that existing users, superusers, and staff users
have the ability to transfer owndership.
"""
# Custom attributes
model = None
pk_field = None
owner_field = None
# FormView attributes
form_class = ContentAuthorForm
http_method_names = ['get', 'post']
template_name = "goals/transfer.html"
def get_success_url(self):
return self.object.get_absolute_url()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['object'] = self.object
ctx['owner'] = getattr(self.object, self.owner_field)
return ctx
def get_object(self, kwargs):
if None in [self.model, self.pk_field, self.owner_field]:
raise RuntimeError(
"BaseTransferView subclasses must define the following: "
"model, pk_field, and owner_field."
)
params = {self.pk_field: kwargs.get(self.pk_field, None)}
return get_object_or_404(self.model, **params)
def _can_transfer(self, user):
return any([
getattr(self.object, self.owner_field) == user,
user.is_staff,
user.is_superuser,
])
def _http_method(self, request, *args, **kwargs):
if not self._can_transfer(request.user):
messages.warning(request, "You are not the owner of that object.")
return redirect(self.object.get_absolute_url())
elif request.method == "GET":
return super().get(request, *args, **kwargs)
elif request.method == "POST":
return super().post(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object(kwargs)
return self._http_method(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object(kwargs)
return self._http_method(request, *args, **kwargs)
def form_valid(self, form):
# Set the new owner and carry on.
setattr(self.object, self.owner_field, form.cleaned_data['user'])
self.object.save()
return super().form_valid(form)
class PublishView(View):
"""A Simple Base View for subclasses that need to publish content. This
is overridden by views that specify the model and slug_field for different
types of content.
"""
http_method_names = ['post']
model = None
slug_field = None
def get_object(self, kwargs):
if self.model is None or self.slug_field is None:
raise RuntimeError(
"PublishView subclasses must define a model and slug_field "
"attributes."
)
params = {self.slug_field: kwargs.get(self.slug_field, None)}
return self.model.objects.get(**params)
def post(self, request, *args, **kwargs):
try:
obj = self.get_object(kwargs)
selections = num_user_selections(obj)
confirmed = request.POST.get('confirmed', False) or selections <= 0
is_superuser = request.user.is_superuser
if request.POST.get('publish', False):
obj.publish()
obj.save(updated_by=request.user)
messages.success(request, "{0} has been published".format(obj))
elif request.POST.get('decline', False):
obj.decline()
obj.save(updated_by=request.user)
messages.success(request, "{0} has been declined".format(obj))
elif confirmed and request.POST.get('draft', False):
obj.draft()
obj.save(updated_by=request.user)
messages.success(request, "{0} is now in Draft".format(obj))
elif request.POST.get('draft', False) and selections > 0:
context = {'selections': selections, 'object': obj}
return render(request, 'goals/confirm_state_change.html', context)
elif is_superuser and request.POST.get('publish_children', False):
count = 0 # count *all* children published.
if not obj.state == "published":
obj.publish()
obj.save(updated_by=request.user)
count += 1
# Now, publish all the children
children = obj.publish_children(updated_by=request.user)
count += len(children)
# and the children's children
while len(children) > 0:
children = [
item.publish_children(updated_by=request.user)
for item in children
]
children = [val for sublist in children for val in sublist]
count += len(children)
messages.success(request, "Published {} items".format(count))
return redirect(obj.get_absolute_url())
except self.model.DoesNotExist:
messages.error(
request, "Could not find the specified {0}".format(self.model)
)
except TransitionNotAllowed:
messages.error(request, "Unable to process transition.")
return redirect("goals:index")
class ContentDeleteView(DeleteView):
"""This is a Base DeleteView for our Content models.It doesn't allow for
deletion if users have selected the object (e.g. Content or Goal).
Works with: Category, Goal, Action
"""
def get_num_user_selections(self):
if not hasattr(self, "_num_user_selections"):
obj = self.get_object()
self._num_user_selections = num_user_selections(obj)
return self._num_user_selections
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['num_user_selections'] = self.get_num_user_selections()
return context
def delete(self, request, *args, **kwargs):
if self.get_num_user_selections() > 0:
msg = "You cannot remove objects that have been selected by users"
return HttpResponseForbidden(msg)
return super().delete(request, *args, **kwargs)
class CreatedByView(CreateView):
"""A Subclass of CreateView that tracks who created the object."""
def form_valid(self, form):
result = super(CreatedByView, self).form_valid(form)
self.object.save(created_by=self.request.user)
return result
class IndexView(ContentViewerMixin, TemplateView):
template_name = "goals/index.html"
def get(self, request, *args, **kwargs):
"""Include the following additional info for the goal app's index:
* pending and declined content items for editors
* all "my" information (for authors)
* some stats on the most popular content.
"""
# Only the fields needed for Category, Goal, Action objects
# on this page.
only_fields = [
'id', 'title', 'title_slug', 'updated_on', 'updated_by',
'created_by', 'state',
]
context = self.get_context_data(**kwargs)
is_contributor = request.user.category_contributions.exists()
if is_content_editor(request.user) or is_contributor:
context['is_editor'] = True
# Show content pending review.
mapping = {
'categories': Category.objects.only(*only_fields).filter,
'goals': Goal.objects.only(*only_fields).filter,
'actions': Action.objects.only(*only_fields).filter,
}
for key, func in mapping.items():
qs = func(state='pending-review').order_by("-updated_on")
if is_contributor:
qs = qs.for_contributor(request.user)
context[key] = qs
# List content created/updated by the current user.
conditions = Q(created_by=request.user) | Q(updated_by=request.user)
mapping = {
'my_categories': Category.objects.filter,
'my_goals': Goal.objects.only(*only_fields).filter,
'my_actions': Action.objects.only(*only_fields).filter,
}
for key, func in mapping.items():
context[key] = func(conditions)
# Evaluate to see if the curent user has any content available
total_items = sum([
context['my_categories'].count(),
context['my_goals'].count(),
context['my_actions'].count(),
])
context['has_my_content'] = total_items > 0
context['total_my_content'] = total_items
# IF the result is too big, limit the results...
if total_items > 40:
for key, func in mapping.items():
context[key] = context[key][0:10]
context['num_my_content'] = sum(
len(context[key]) for key in mapping.keys()
)
return self.render_to_response(context)
class MyContentView(ContentViewerMixin, TemplateView):
"""A list of all content 'owned' by the authenticated users.
This information is abbreviated in the IndexView, but if the user has
a lot of content this view lets them see it all at once.
"""
template_name = "goals/my_content.html"
def get(self, request, *args, **kwargs):
"""Includes all content "owned" by the authenticated users into
the context; e.g. the author's "my content"
"""
context = self.get_context_data(**kwargs)
# Only the fields needed for Category, Goal, Action objects
# on this page.
only_fields = [
'id', 'title', 'title_slug', 'updated_on', 'updated_by',
'created_by', 'state',
]
# List content created/updated by the current user.
conditions = Q(created_by=request.user) | Q(updated_by=request.user)
mapping = {
'my_categories': Category.objects.filter,
'my_goals': Goal.objects.only(*only_fields).filter,
'my_actions': Action.objects.only(*only_fields).filter,
}
for key, func in mapping.items():
context[key] = func(conditions)
total_items = sum([
context['my_categories'].count(),
context['my_goals'].count(),
context['my_actions'].count(),
])
context['has_my_content'] = total_items > 0
context['total_my_content'] = total_items
context['num_my_content'] = total_items
return self.render_to_response(context)
class CategoryListView(ContentViewerMixin, StateFilterMixin, ListView):
model = Category
context_object_name = 'categories'
template_name = "goals/category_list.html"
def _filters(self):
kw = {}
selected = self.request.GET.get('selected_by_default', None)
if selected is not None:
kw['selected_by_default'] = bool(selected)
featured = self.request.GET.get('featured', None)
if featured is not None:
kw['grouping__gte'] = 0
packaged = self.request.GET.get('packaged_content', None)
if packaged is not None:
kw['packaged_content'] = bool(packaged)
organizations = self.request.GET.get('organizations', None)
if organizations is not None:
kw['organizations__isnull'] = False
return kw
def get_queryset(self):
queryset = super().get_queryset().filter(**self._filters())
queryset = queryset.annotate(Count('usercategory'))
return queryset.prefetch_related("goal_set", "organizations", "program_set")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(self._filters())
if context.get('grouping__gte', None) == 0:
context['featured'] = True
if context.get('organizations__isnull') is False:
context['organizations'] = True
context['category_list'] = True
return context
class CategoryDetailView(ContentViewerMixin, DetailView):
queryset = Category.objects.all()
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category = context['object']
result = category.goals.aggregate(Max('sequence_order'))
result = result.get('sequence_order__max') or 0
context['order_values'] = list(range(result + 5))
actions = Action.objects.prefetch_related('goals', 'default_trigger')
actions = actions.filter(goals__categories=category).distinct()
goals = defaultdict(set)
for action in actions.order_by("sequence_order"):
for goal in action.goals.all():
goals[goal].add(action)
# ensure results are sorted by goals' sequence_order, and all actions
# are also sorted by sequence order.
goals = [
(goal, sorted(action_set, key=lambda a: a.sequence_order))
for goal, action_set in goals.items()
]
goals = sorted(goals, key=lambda t: t[0].sequence_order)
context['goals'] = goals
return context
class CategoryCreateView(ContentEditorMixin, CreatedByView):
model = Category
form_class = CategoryForm
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
def get_form_kwargs(self):
"""Includes the current user in the form's kwargs."""
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_success_url(self):
url = super().get_success_url()
messages.success(self.request, "Your category has been created.")
return url
def get_initial(self, *args, **kwargs):
"""Pre-populate the value for the initial order. This can't be done
at the class level because we want to query the value each time."""
initial = super(CategoryCreateView, self).get_initial(*args, **kwargs)
if 'order' not in initial:
initial['order'] = get_max_order(Category)
return initial
class CategoryDuplicateView(CategoryCreateView):
"""Initializes the Create form with a copy of data from another object."""
def get_initial(self, *args, **kwargs):
initial = super(CategoryDuplicateView, self).get_initial(*args, **kwargs)
try:
obj = self.get_object()
initial.update({
"title": "Copy of {0}".format(obj.title),
"description": obj.description,
"color": obj.color,
})
except self.model.DoesNotExist:
pass
initial['order'] = get_max_order(Category)
return initial
class CategoryPublishView(ContentEditorMixin, PublishView):
model = Category
slug_field = 'title_slug'
class CategoryTransferView(BaseTransferView):
model = Category
pk_field = "pk"
owner_field = "created_by"
success_url = reverse_lazy('goals:category-list')
class CategoryUpdateView(ContentEditorMixin, ReviewableUpdateMixin, UpdateView):
model = Category
form_class = CategoryForm
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
def get_form_kwargs(self):
"""Includes the current user in the form's kwargs."""
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_success_url(self):
url = super().get_success_url()
messages.success(self.request, "Your category has been saved")
return url
class CategoryDeleteView(ContentEditorMixin, ContentDeleteView):
model = Category
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
success_url = reverse_lazy('goals:index')
@user_passes_test(superuser_required, login_url='/goals/')
def reset_default_triggers_in_category(request, pk, title_slug):
"""This is a util view that lets a superuser do one of the following:
1. Reset all default triggers (time of day/ frequency) for all Actions
within a category, OR
2. Reset all Action priorities within the category.
XXX: this is probably too much for one view, but it's convenient to put
both options, here.
"""
category = get_object_or_404(Category, pk=pk, title_slug=title_slug)
if request.method == "POST":
trigger_form = TriggerForm(request.POST, prefix="triggers")
reset_triggers = trigger_form.is_valid()
priority_form = ActionPriorityForm(request.POST, prefix="priority")
reset_priorities = priority_form.is_valid()
if any([reset_triggers, reset_priorities]):
trigger_count = 0
priority_count = 0
for action in category.actions:
# ToD and Frequency are optional, so only do updates if
# they've been selected for changes.
tod = trigger_form.cleaned_data.get('time_of_day')
freq = trigger_form.cleaned_data.get('frequency')
if reset_triggers and (tod or freq):
action.default_trigger.reset()
if tod:
action.default_trigger.time_of_day = tod
if freq:
action.default_trigger.frequency = freq
action.default_trigger.save()
trigger_count += 1
# Priority is also optional.
priority = priority_form.cleaned_data.get('priority')
if reset_priorities and priority:
action.priority = priority
action.save()
priority_count += 1
msg = "Reset {} default triggers and {} priorities.".format(
trigger_count,
priority_count
)
messages.success(request, msg)
return redirect(category.get_absolute_url())
else:
trigger_form = TriggerForm(prefix="triggers")
priority_form = ActionPriorityForm(prefix="priority")
template = "goals/reset_default_triggers_in_category.html"
ctx = {
'trigger_form': trigger_form,
'priority_form': priority_form,
'category': category,
}
return render(request, template, ctx)
class GoalListView(ContentViewerMixin, StateFilterMixin, ListView):
model = Goal
context_object_name = 'goals'
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.annotate(Count('usergoal'))
if self.request.GET.get('category', False):
queryset = queryset.filter(categories__pk=self.request.GET['category'])
return queryset.prefetch_related("categories")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_id = self.request.GET.get('category', None)
if category_id:
context['category'] = Category.objects.get(pk=category_id)
return context
class GoalDetailView(ContentViewerMixin, DetailView):
queryset = Goal.objects.all()
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
goal = context['object']
# Generate a list of int Ordering values based on existing numbers
# used for this Goal's actions.
max_order = goal.action_set.aggregate(Max('sequence_order'))
max_order = max_order.get('sequence_order__max') or 0
context['order_values'] = list(range(max_order + 5))
return context
class GoalCreateView(ContentAuthorMixin, CreatedByView):
model = Goal
form_class = GoalForm
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
def get_initial(self):
data = self.initial.copy()
data['categories'] = self.request.GET.getlist('category', None)
return data
def get_success_url(self):
url = super().get_success_url()
messages.success(self.request, "Your goal has been created.")
return url
def form_valid(self, form):
"""Upons saving, also check if this was submitted for review."""
result = super().form_valid(form)
if self.request.POST.get('review', False):
msg = ("This goal must have child actions that are either "
"published or in review before it can be reviewed.")
messages.warning(self.request, msg)
# Save the user that created/upadted this object.
goal = self.object
goal.save(
created_by=self.request.user,
updated_by=self.request.user
)
# If we've duplicated a Goal, look up the original's id and
# duplicate all of it's Actions. NOTE: This will be slow and inefficient.
original = form.cleaned_data.get('original_goal', None)
if original:
prefix = md5(timezone.now().strftime("%c").encode('utf8')).hexdigest()[:6]
original_goal = Goal.objects.get(pk=original)
duplicate_actions = []
for action in original_goal.action_set.all():
title = "({}) Copy of {}".format(prefix, action.title)
params = {
"title": title,
"title_slug": slugify(title),
"sequence_order": action.sequence_order,
"description": action.description,
"more_info": action.more_info,
"notification_text": action.notification_text,
"external_resource": action.external_resource,
"external_resource_name": action.external_resource_name,
"priority": action.priority,
"notes": action.notes,
"goals": [goal.id],
}
duplicate_actions.append(Action(**params))
Action.objects.bulk_create(duplicate_actions)
return result
class GoalDuplicateView(GoalCreateView):
"""Initializes the Create form with a copy of data from another object."""
def get_initial(self, *args, **kwargs):
initial = super(GoalDuplicateView, self).get_initial(*args, **kwargs)
try:
obj = self.get_object()
initial.update({
"title": "Copy of {0}".format(obj.title),
'sequence_order': obj.sequence_order,
"categories": obj.categories.values_list("id", flat=True),
"description": obj.description,
"original_goal": obj.id,
})
except self.model.DoesNotExist:
pass
return initial
class GoalPublishView(ContentEditorMixin, PublishView):
model = Goal
slug_field = 'title_slug'
class GoalTransferView(BaseTransferView):
model = Goal
pk_field = "pk"
owner_field = "created_by"
class GoalUpdateView(ContentAuthorMixin, ReviewableUpdateMixin, UpdateView):
model = Goal
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
form_class = GoalForm
def get_context_data(self, **kwargs):
context = super(GoalUpdateView, self).get_context_data(**kwargs)
context['goals'] = Goal.objects.all().prefetch_related("categories")
return context
class GoalDeleteView(ContentEditorMixin, ContentDeleteView):
model = Goal
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
success_url = reverse_lazy('goals:index')
class TriggerListView(ContentViewerMixin, ListView):
model = Trigger
fields = (
'id', 'name', 'time', 'trigger_date', 'recurrences',
'relative_value', 'relative_units',
'time_of_day', 'frequency', 'action_default',
)
queryset = Trigger.objects.default().values(*fields)
context_object_name = 'triggers'
class TriggerDetailView(ContentEditorMixin, DetailView):
queryset = Trigger.objects.default()
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
class ActionListView(ContentViewerMixin, StateFilterMixin, ListView):
model = Action
context_object_name = 'actions'
paginate_by = 100
def _filter_queryset(self, queryset):
search = self.request.GET.get('filter', None)
lookups = {
'empty-source_link': Q(source_link=''),
'empty-source_link': Q(source_link=''),
'empty-source_notes': Q(source_notes=''),
'empty-notes': Q(notes=''),
'empty-more_info': Q(more_info=''),
'empty-description': Q(description=''),
'empty-external_resource': Q(external_resource=''),
'empty-external_resource_name': Q(external_resource_name=''),
'empty-notification_text': Q(notification_text=''),
'contains-source_link': Q(source_link__gt=''),
'contains-source_notes': Q(source_notes__gt=''),
'contains-notes': Q(notes__gt=''),
'contains-more_info': Q(more_info__gt=''),
'contains-description': Q(description__gt=''),
'contains-external_resource': Q(external_resource__gt=''),
'contains-external_resource_name': Q(external_resource_name__gt=''),
'contains-notification_text': Q(notification_text__gt=''),
}
if search in lookups:
return queryset.filter(lookups[search])
return queryset
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.annotate(Count('useraction'))
if self.request.GET.get('goal', False):
queryset = queryset.filter(goals__id=self.request.GET['goal'])
if self.request.GET.get('category', False):
queryset = queryset.filter(goals__categories__pk=self.request.GET['category'])
ordering = self.request.GET.get("ordering", "-updated_on")
# Run the result through any of our filters.
queryset = self._filter_queryset(queryset)
return queryset.order_by(ordering).distinct()
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
goal_id = self.request.GET.get('goal', None)
if goal_id:
ctx['goal'] = Goal.objects.get(pk=goal_id)
ctx['action_filter'] = self.request.GET.get('filter', None)
return ctx
class ActionDetailView(ContentViewerMixin, DetailView):
queryset = Action.objects.all()
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
pk_url_kwarg = 'pk'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['disable_trigger_form'] = DisableTriggerForm()
return ctx
class ActionCreateView(ContentAuthorMixin, CreatedByView):
model = Action
form_class = ActionForm
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
pk_url_kwarg = 'pk'
action_type = Action.SHOWING
action_type_name = 'Showing'
trigger_date = None
def _set_action_type(self, action_type):
"""Ensure the provided action type is valid."""
if action_type in [at[0] for at in Action.ACTION_TYPE_CHOICES]:
self.action_type = action_type
self.action_type_name = [
at[1] for at in Action.ACTION_TYPE_CHOICES
if action_type == at[0]
][0]
def _set_trigger_date(self, date):
if date:
self.trigger_date = datetime.strptime(date, "%Y-%m-%d")
def get_initial(self):
data = self.initial.copy()
data.update(self.form_class.INITIAL[self.action_type])
data['goals'] = self.request.GET.getlist('goal', None)
return data
def get(self, request, *args, **kwargs):
# See if we're creating a specific Action type, and if so,
# prepopulate the form with some initial data.
self._set_action_type(request.GET.get("actiontype", self.action_type))
self._set_trigger_date(request.GET.get("date", None))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# Handle dealing with 2 forms.
self.object = None
form = self.get_form()
trigger_form = ActionTriggerForm(request.POST, prefix="trigger")
if form.is_valid() and trigger_form.is_valid():
return self.form_valid(form, trigger_form)
else:
return self.form_invalid(form, trigger_form)
def get_form(self, form_class=None):
"""Include the user as a keyword arg for the form class."""
form_class = form_class or self.get_form_class()
kwargs = self.get_form_kwargs()
kwargs['user'] = self.request.user
return form_class(**kwargs)
def form_valid(self, form, trigger_form):
self.object = form.save()
default_trigger = trigger_form.save(commit=False)
trigger_name = "Default: {0}-{1}".format(self.object, self.object.id)
default_trigger.name = trigger_name[:128]
default_trigger.save()
self.object.default_trigger = default_trigger
# If the POSTed data contains a True 'review' value, the user clicked
# the "Submit for Review" button.
if self.request.POST.get('review', False):
self.object.review() # Transition to the new state
msg = "{0} has been submitted for review".format(self.object)
messages.success(self.request, msg)
else:
messages.success(self.request, "Your notification has been created.")
self.object.save(
created_by=self.request.user,
updated_by=self.request.user
)
return redirect(self.get_success_url())
def form_invalid(self, form, trigger_form):
ctx = self.get_context_data(form=form, trigger_form=trigger_form)
return self.render_to_response(ctx)
def get_context_data(self, **kwargs):
context = super(ActionCreateView, self).get_context_data(**kwargs)
context['Action'] = self.model
context['action_type'] = self.action_type
context['action_type_name'] = self.action_type_name
# We also list all existing actions & link to them.
context['actions'] = Action.objects.all()
# pre-populate some dynamic content displayed to the user
if 'trigger_form' not in context and self.trigger_date:
context['trigger_form'] = ActionTriggerForm(
prefix="trigger",
initial={'trigger_date': self.trigger_date.strftime("%m/%d/%Y")}
)
elif 'trigger_form' not in context:
context['trigger_form'] = ActionTriggerForm(prefix="trigger")
return context
class ActionDuplicateView(ActionCreateView):
"""Initializes the Create form with a copy of data from another object."""
def get_initial(self, *args, **kwargs):
initial = super(ActionDuplicateView, self).get_initial(*args, **kwargs)
try:
obj = self.get_object()
initial.update({
"title": "Copy of {0}".format(obj.title),
'action_type': obj.action_type,
'goals': [g.id for g in obj.goals.all()],
"sequence_order": obj.sequence_order,
'source_link': obj.source_link,
'source_notes': obj.source_notes,
'notes': obj.notes,
"more_info": obj.more_info,
"description": obj.description,
"external_resource": obj.external_resource,
"external_resource_name": obj.external_resource_name,
"notification_text": obj.notification_text,
"priority": obj.priority,
})
except self.model.DoesNotExist:
pass
return initial
class ActionTransferView(BaseTransferView):
model = Action
pk_field = "pk"
owner_field = "created_by"
class ActionPublishView(ContentEditorMixin, PublishView):
model = Action
slug_field = 'title_slug'
pk_url_kwarg = 'pk'
def get_object(self, kwargs):
"""Actions may have have duplicates title_slug values, so we need to
explicitly construct the lookup values."""
params = {
self.slug_field: kwargs.get(self.slug_field, None),
self.pk_url_kwarg: kwargs.get(self.pk_url_kwarg, None),
}
return self.model.objects.get(**params)
class ActionUpdateView(ContentAuthorMixin, ReviewableUpdateMixin, UpdateView):
model = Action
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
pk_url_kwarg = 'pk'
form_class = ActionForm
def get_success_url(self):
return self.object.get_absolute_url()
def post(self, request, *args, **kwargs):
# Handle dealing with 2 forms.
self.object = self.get_object()
form = self.get_form()
trigger_form = ActionTriggerForm(
request.POST,
instance=self.object.default_trigger,
prefix="trigger"
)
if form.is_valid() and trigger_form.is_valid():
return self.form_valid(form, trigger_form)
else:
return self.form_invalid(form, trigger_form)
def get_form(self, form_class=None):
"""Include the user as a keyword arg for the form class."""
form_class = form_class or self.get_form_class()
kwargs = self.get_form_kwargs()
kwargs['user'] = self.request.user
return form_class(**kwargs)
def form_valid(self, form, trigger_form):
self.object = form.save(commit=False)
self.object.default_trigger = trigger_form.save()
# call up to the superclass's method to handle state transitions
# and set updated_by
super().form_valid(form)
messages.success(self.request, "Your notification has been saved")
return redirect(self.get_success_url())
def form_invalid(self, form, trigger_form):
ctx = self.get_context_data(form=form, trigger_form=trigger_form)
return self.render_to_response(ctx)
def get_context_data(self, **kwargs):
context = super(ActionUpdateView, self).get_context_data(**kwargs)
context['Action'] = self.model
# Include a form for the default trigger
if 'trigger_form' not in context:
context['trigger_form'] = ActionTriggerForm(
instance=self.object.default_trigger,
prefix="trigger"
)
# And the ability to disable it.
context['disable_trigger_form'] = DisableTriggerForm()
return context
def disable_trigger(request, pk, title_slug):
"""A Simple view to remove an action's default trigger."""
action = get_object_or_404(Action, pk=pk)
if request.method == "POST":
form = DisableTriggerForm(request.POST)
if form.is_valid():
action.disable_default_trigger()
messages.success(request, "The default trigger has been removed.")
else:
messages.error(request, "Sorry, we could not remove the trigger.")
return redirect(action.get_absolute_url())
class ActionDeleteView(ContentEditorMixin, ContentDeleteView):
model = Action
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
pk_url_kwarg = 'pk'
success_url = reverse_lazy('goals:index')
class OrganizationListView(StaffRequiredMixin, ListView):
model = Organization
context_object_name = 'organizations'
template_name = "goals/organization_list.html"
class OrganizationDetailView(StaffRequiredMixin, DetailView):
queryset = Organization.objects.all()
slug_field = "title_slug"
slug_url_kwarg = "title_slug"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['daily_progresses'] = self.object.daily_progresses()
return context
class OrganizationCreateView(StaffRequiredMixin, CreateView):
model = Organization
form_class = OrganizationForm
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
def get_success_url(self):
url = super().get_success_url()
messages.success(self.request, "Your organization has been created.")
return url
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organizations'] = Organization.objects.all()
return context
class OrganizationUpdateView(StaffRequiredMixin, UpdateView):
model = Organization
form_class = OrganizationForm
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
def get_success_url(self):
url = super().get_success_url()
messages.success(self.request, "Your organization has been saved")
return url
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organizations'] = Organization.objects.all()
return context
class OrganizationDeleteView(StaffRequiredMixin, DeleteView):
model = Organization
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
success_url = reverse_lazy('goals:organization-list')
def delete(self, request, *args, **kwargs):
obj = self.get_object()
if obj.members.count() > 0:
msg = "You cannot remove an Organization with members."
return HttpResponseForbidden(msg)
result = super().delete(request, *args, **kwargs)
messages.success(
request,
"Your organization ({}) has been deleted.".format(obj.name)
)
return result
@user_passes_test(staff_required, login_url='/')
def organization_membership_download(request, pk):
"""Allow a staff user to download an Organization's membership report."""
organization = get_object_or_404(Organization, pk=pk)
dataset = tablib.Dataset()
now = timezone.now()
for dp in organization.daily_progresses():
values = [
dp.user.get_full_name(), dp.user.email,
dp.engagement_15_days, dp.engagement_30_days, dp.engagement_60_days,
]
for days in [7, 30, 60, 90, 120, 150]:
since = now - timedelta(days=days)
ucas = dp.user.usercompletedaction_set.filter(created_on__gte=since)
ucas = ucas.filter(state=UserCompletedAction.COMPLETED)
values.append(ucas.count())
dataset.append(values)
dataset.headers = [
'Student', 'Email', '15-Day Engagement', '30-Day Engagement',
'60-Day Engagement', '7-Day Actions', '30-Day Actions', '60-Day Actions',
'90-Day Actions', '120-Day Actions', '150-Day Actions'
]
filename = '{}-member_engagement_report.csv'.format(slugify(organization))
response = HttpResponse(dataset.csv, content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
@user_passes_test(staff_required, login_url='/')
def organization_add_member(request, organization_id, name_slug=None):
"""Allow a staff user to add a member to an organization."""
organization = get_object_or_404(Organization, pk=organization_id)
if request.method == "POST":
form = MembersForm(request.POST)
if form.is_valid():
for member in form.cleaned_data['members']:
organization.members.add(member)
organization.save()
messages.success(request, "Users have been added")
return redirect(organization.get_absolute_url())
else:
form = MembersForm()
context = {'organization': organization, 'form': form}
return render(request, 'goals/organization_add_member.html', context)
@user_passes_test(staff_required, login_url='/')
def program_add_member(request, program_id):
"""Allow a staff user to add a member to a program."""
program = get_object_or_404(Program, pk=program_id)
if request.method == "POST":
form = MembersForm(request.POST)
if form.is_valid():
for member in form.cleaned_data['members']:
program.members.add(member)
program.save()
messages.success(request, "Users have been added")
return redirect(program.get_absolute_url())
else:
form = MembersForm()
context = {'program': program, 'form': form}
return render(request, 'goals/program_add_member.html', context)
class ProgramListView(StaffRequiredMixin, ListView):
"""A list of all programs (not filtered by Organization)."""
model = Program
context_object_name = 'programs'
template_name = "goals/program_list.html"
class ProgramDetailView(StaffRequiredMixin, DetailView):
queryset = Program.objects.all()
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organization'] = self.object.organization
return context
class ProgramCreateView(StaffRequiredMixin, CreateView):
model = Program
form_class = ProgramForm
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
def _get_organization(self, pk):
try:
return Organization.objects.get(pk=pk)
except Organization.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
self.organization = self._get_organization(kwargs['pk'])
if self.organization is None:
return HttpResponseNotFound(render_to_string('404.html'))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.organization = self._get_organization(kwargs['pk'])
if self.organization is None:
return HttpResponseNotFound(render_to_string('404.html'))
return super().post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organization'] = self.organization
return kwargs
def get_success_url(self):
messages.success(self.request, "Your program has been created.")
return self.organization.get_absolute_url()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organization'] = self.organization
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.organization = self.organization
self.object.save()
form.save_m2m() # save the categories & goals
return redirect(self.get_success_url())
class ProgramUpdateView(StaffRequiredMixin, UpdateView):
model = Program
form_class = ProgramForm
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organization'] = self.object.organization
return kwargs
def get_success_url(self):
messages.success(self.request, "Your program has been saved")
return self.object.get_absolute_url()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organization'] = self.object.organization
return context
class ProgramDeleteView(StaffRequiredMixin, DeleteView):
model = Program
slug_field = "name_slug"
slug_url_kwarg = "name_slug"
success_url = reverse_lazy('goals:program-list')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organization'] = self.object.organization
return context
def delete(self, request, *args, **kwargs):
obj = self.get_object()
if obj.members.exists():
msg = "You cannot remove an Program with members."
return HttpResponseForbidden(msg)
result = super().delete(request, *args, **kwargs)
messages.success(
request,
"Your program ({}) has been deleted.".format(obj.name)
)
return result
class PackageListView(ContentViewerMixin, ListView):
queryset = Category.objects.packages(published=False)
context_object_name = 'categories'
template_name = "goals/package_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class PackageDetailView(ContentViewerMixin, DetailView):
queryset = Category.objects.packages(published=False)
context_object_name = 'category'
template_name = "goals/package_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
editor = any([
self.request.user.is_staff,
self.request.user.has_perm('goals.publish_category'),
self.request.user in self.object.contributors.all()
])
context['is_editor'] = editor
if editor:
context['enrollments'] = self.object.packageenrollment_set.all()
return context
@permission_required(ContentPermissions.package_managers)
def package_enrollment_user_details(request, package_id, user_id):
User = get_user_model()
user = get_object_or_404(User, pk=user_id)
category = get_object_or_404(Category, pk=package_id)
ctx = {
'is_editor': is_content_editor(request.user),
'package_user': user,
'category': category,
'packages': user.packageenrollment_set.all(),
}
return render(request, "goals/package_enrollment_user_details.html", ctx)
class PackageEnrollmentDeleteView(PackageManagerMixin, DeleteView):
model = PackageEnrollment
success_url = reverse_lazy('goals:package-list')
def get_package_data(self, package):
user = package.user
# UserGoals & UserActions
criteria = {"primary_category": package.category}
user_goals = user.usergoal_set.filter(**criteria)
user_actions = user.useraction_set.filter(**criteria)
# UserCompletedActions
ucas = UserCompletedAction.objects.filter(useraction__in=user_actions)
return {
'user_goals': user_goals,
'user_actions': user_actions,
'ucas': ucas,
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
package = self.get_object()
data = self.get_package_data(package)
user_goals = data['user_goals']
user_actions = data['user_actions']
ucas = data['ucas']
ucas_count = ucas.count()
ucas_completed = ucas.filter(state=UserCompletedAction.COMPLETED).count()
ucas_dismissed = ucas.filter(state=UserCompletedAction.DISMISSED).count()
ucas_snoozed = ucas.filter(state=UserCompletedAction.SNOOZED).count()
context['category'] = package.category
context['package'] = package
context['package_user'] = package.user
context['user_goals'] = user_goals
context['user_actions'] = user_actions
context['ucas_count'] = ucas_count
context['ucas_completed'] = ucas_completed
context['ucas_dismissed'] = ucas_dismissed
context['ucas_snoozed'] = ucas_snoozed
return context
def delete(self, request, *args, **kwargs):
# Remove the user's selected content that's within the package.
package = self.get_object()
data = self.get_package_data(package)
for obj in data.values():
obj.delete()
messages.success(
request,
"The Package Enrollment for {} was removed".format(package.user)
)
return super().delete(request, *args, **kwargs)
class PackageEnrollmentView(ContentAuthorMixin, FormView):
"""Allow a user with *Author* permissions to automatically enroll users
in a *package* of content. This will do the following:
1. Create user accounts if they don't already exist.
2. Assign users to all of the content in the package (i.e. create the
intermediary UserAction, UserGoal, and UserCategory objects)
as if the user navigated through the app and selected them.
3. Send the user an email letting them know they've been enrolled.
"""
template_name = "goals/package_enroll.html"
form_class = PackageEnrollmentForm
def _can_access(self):
# Determine if a user should be able to access this view.
# REQUIRES self.category.
return any([
self.request.user.is_staff,
self.request.user.has_perm('goals.publish_goal'),
self.request.user in self.category.contributors.all()
])
def get_success_url(self):
return self.category.get_view_enrollment_url()
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(self.category, **self.get_form_kwargs())
def get(self, request, *args, **kwargs):
self.category = get_object_or_404(Category, pk=kwargs.get('pk'))
form = self.get_form()
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
self.category = get_object_or_404(Category, pk=kwargs.pop('pk', None))
if not self._can_access():
return HttpResponseForbidden()
return super().post(request, *args, **kwargs)
def form_valid(self, form):
# create user enrollment objects.
goals = form.cleaned_data['packaged_goals']
emails = form.cleaned_data['email_addresses']
prevent_triggers = form.cleaned_data.get('prevent_custom_triggers', False)
# Create enrollments if necessary.
enrollments = PackageEnrollment.objects.batch_enroll(
emails,
self.category,
goals,
by=self.request.user,
prevent_triggers=prevent_triggers
)
# send a link to the package enrollment not the user.
send_package_enrollment_batch(self.request, enrollments)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['category'] = self.category
if not self._can_access():
context['form'] = None
return context
@permission_required(ContentPermissions.authors)
def package_calendar(request, pk):
category = get_object_or_404(Category, pk=pk)
start = request.GET.get('d', None)
if start is None:
# Start on the first day of the current month
start = local_now(request.user)
start = to_localtime(datetime(start.year, start.month, start.day), request.user)
elif len(start) == len('yyyy-mm-dd'):
year, month, day = start.split('-')
start = to_localtime(datetime(int(year), int(month), int(day)), request.user)
else:
year, month = start.split('-')
start = to_localtime(datetime(int(year), int(month), 1), request.user)
# Include recurrences for actions that have both a default trigger AND
# where those triggers have a time (otherwise they're essentially invalid)
actions = category.actions.filter(
default_trigger__isnull=False,
default_trigger__time__isnull=False # exclude invalid triggers
)
# note: start calendar on suday (6)
cal = Calendar(firstweekday=6).monthdatescalendar(start.year, start.month)
action_data = []
stop_on_completes = defaultdict(int) # Action.id to number of iterations
contains_relative_reminders = False
for action in actions:
kwargs = {'days': 31} # params for get_occurances
if action.default_trigger.is_relative:
# XXX: Temporarily set the trigger's start date, so this date
# gets used when generating recurrences (which is how this will
# work when a user selects the action). Additionally, we need to
# temporarily assign a user (the logged in user) to make this work.
action.default_trigger.user = request.user
start_on = action.default_trigger.relative_trigger_date(start)
action.default_trigger.trigger_date = start_on
# include some meta-data for the stop-on-complete actions
action.stop_on_complete = action.default_trigger.stop_on_complete
for dt in action.default_trigger.get_occurences(**kwargs):
stop_counter = None # A counter for the stop_on_complete triggers.
if action.stop_on_complete:
stop_on_completes[action.id] += 1
stop_counter = stop_on_completes[action.id]
action_data.append((dt.date(), dt, action, stop_counter))
# include a list of goal-ids in the action
action.goal_ids = list(action.goals.values_list('id', flat=True))
action.is_relative = action.default_trigger.is_relative
if action.default_trigger.is_relative:
contains_relative_reminders = True
action_data = sorted(action_data, key=lambda d: d[1].strftime("%Y%m%d%H%M"))
goals = list(category.goals.values_list('id', 'title'))
ctx = {
'is_editor': is_content_editor(request.user),
'today': local_now(request.user),
'category': category,
'actions': action_data,
'calendar': cal,
'starting_date': start,
'next_date': (cal[-1][-1] + timedelta(days=1)).strftime("%Y-%m"),
'prev_date': (cal[0][0] - timedelta(days=1)).strftime("%Y-%m"),
'goals': goals,
'contains_relative_reminders': contains_relative_reminders,
}
return render(request, "goals/package_calendar.html", ctx)
@permission_required(ContentPermissions.authors)
def enrollment_cta_email(request, pk):
"""Let us send an arbitrary CTA email to users enrolled in a package."""
category = get_object_or_404(Category, pk=pk)
enrollments = category.packageenrollment_set.filter(accepted=True)
if request.method == "POST":
form = CTAEmailForm(request.POST)
if form.is_valid():
params = {
'cta_link': form.cleaned_data['link'],
'cta_text': form.cleaned_data['link_text'],
'message': form.cleaned_data['message'],
'subject': form.cleaned_data['subject'],
}
send_package_cta_email(request, enrollments, **params)
messages.success(request, "Your message has been sent")
return redirect(category.get_view_enrollment_url())
else:
form = CTAEmailForm()
ctx = {'form': form, 'category': category, 'enrollments': enrollments}
return render(request, "goals/package_enrollment_cta_email.html", ctx)
@permission_required(ContentPermissions.authors)
def enrollment_reminder(request, pk):
"""Let us send a reminder email to users that have not accepted the
enrollment."""
category = get_object_or_404(Category, pk=pk)
enrollments = category.packageenrollment_set.filter(accepted=False)
if request.method == "POST":
form = EnrollmentReminderForm(request.POST)
if form.is_valid():
msg = form.cleaned_data['message']
send_package_enrollment_batch(request, enrollments, message=msg)
messages.success(request, "Your message has been sent")
return redirect(category.get_view_enrollment_url())
else:
form = EnrollmentReminderForm()
ctx = {'form': form, 'category': category, 'enrollments': enrollments}
return render(request, "goals/package_enrollment_reminder.html", ctx)
def accept_enrollment(request, pk):
"""This view lets new users "claim" their account, set a password, & agree
to some terms/conditions and a consent form for each Package/Category,
before giving them a link to the app.
Existing users who are being enrolled in a new Package/Category will have
the option to just accept the consent form.
"""
accept_form = None
has_form_errors = False
password_form = None
user_form = None
package = get_object_or_404(PackageEnrollment, pk=pk)
if request.method == "POST" and package.user.is_active:
# An existing user is being enrolled in a new package.
accept_form = AcceptEnrollmentForm(request.POST, prefix="aef")
if accept_form.is_valid():
# Indicate their acceptance of the consent (The form isn't
# valid without doing this)
package.accept()
request.session['user_id'] = package.user.id
request.session['package_ids'] = [package.id]
logger.info("Existing user accepted PackageEnrollment: {}".format(pk))
return redirect(reverse("goals:accept-enrollment-complete"))
else:
has_form_errors = True
elif request.method == "POST":
# This is for a new user
user_form = UserForm(request.POST, instance=package.user, prefix="uf")
password_form = SetNewPasswordForm(request.POST, prefix="pf")
accept_form = AcceptEnrollmentForm(request.POST, prefix="aef")
forms_valid = [
user_form.is_valid(), password_form.is_valid(), accept_form.is_valid()
]
if all(forms_valid):
# Be sure to activate their account.
user = user_form.save()
user.is_active = True
user.set_password(password_form.cleaned_data['password'])
user.save()
# Now, indicate their acceptance of the consent (The form isn't
# valid without doing this)
package.accept()
request.session['user_id'] = package.user.id
request.session['package_ids'] = [package.id]
logger.info("New user accepted PackageEnrollment: {}".format(pk))
return redirect(reverse("goals:accept-enrollment-complete"))
else:
has_form_errors = True
elif package.user.is_active:
# they only need the Accept form, not the user-creation stuff.
accept_form = AcceptEnrollmentForm(prefix="aef", package=package)
else:
user_form = UserForm(instance=package.user, prefix="uf")
password_form = SetNewPasswordForm(prefix="pf")
accept_form = AcceptEnrollmentForm(prefix="aef", package=package)
context = {
'user': package.user,
'user_form': user_form,
'password_form': password_form,
'accept_form': accept_form,
'has_form_errors': has_form_errors,
'package': package,
'category': package.category,
}
return render(request, 'goals/accept_enrollment.html', context)
class AcceptEnrollmentCompleteView(TemplateView):
template_name = "goals/accept_enrollment_complete.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['android_url'] = settings.PLAY_APP_URL
context['ios_url'] = settings.IOS_APP_URL
context['packages'] = PackageEnrollment.objects.filter(
id__in=self.request.session.get("package_ids", [])
)
return context
@user_passes_test(is_contributor, login_url='/goals/')
def package_report(request, pk):
category = get_object_or_404(Category, pk=pk)
enrollees = category.packageenrollment_set.values_list('user', flat=True)
today = timezone.now()
days_ago = int(request.GET.get('days_ago', 30))
from_date = today - timedelta(days=days_ago)
# Are Users Completing Actions?
uca_labels = []
completed = []
snoozed = []
dismissed = []
for day in dates_range(days_ago):
uca_labels.append(day.strftime("%F"))
params = {
'user__in': enrollees,
'updated_on__year': day.year,
'updated_on__month': day.month,
'updated_on__day': day.day,
}
results = UserCompletedAction.objects.filter(**params)
completed.append(results.filter(state=UserCompletedAction.COMPLETED).count())
snoozed.append(results.filter(state=UserCompletedAction.SNOOZED).count())
dismissed.append(results.filter(state=UserCompletedAction.DISMISSED).count())
completed_data = {'label': 'Completed Actions', 'data': completed}
snoozed_data = {'label': 'Snoozed Actions', 'data': snoozed}
dismissed_data = {'label': 'Dismissed Actions', 'data': dismissed}
uca_datasets = [(completed_data, snoozed_data, dismissed_data), ]
# Popular Goals
usergoals = UserGoal.objects.filter(
user__in=enrollees,
goal__categories=category
).values_list("goal__title", flat=True).distinct()
usergoals = Counter(usergoals)
usergoals_datasets = {
'label': 'Selected Goals',
'data': list(usergoals.values())
}
usergoals_labels = list(usergoals.keys())
# How long ago each enrollee's userprofile was updated. This corresponds
# to opening the app (since their timezone is updated every time).
profiles = UserProfile.objects.filter(user__in=enrollees)
accessed = profiles.datetimes('updated_on', 'day')
accessed = Counter([timesince(dt) for dt in accessed])
accessed = sorted(accessed.items())
accessed_labels = [a[0] for a in accessed]
accessed_datasets = {
'label': 'App Last Accessed',
'data': [a[1] for a in accessed]
}
# Count the unkown dates (which may happen because we haven't always tracked
unknown = profiles.filter(updated_on__isnull=True).count()
if unknown:
accessed_labels += ['Unkown']
accessed_datasets['data'] += [unknown]
context = {
'category': category,
'enrollees': enrollees,
'days_ago': days_ago,
'today': today,
'from_date': from_date,
'uca_labels': uca_labels,
'uca_datasets': uca_datasets,
'accessed_labels': accessed_labels,
'accessed_datasets': accessed_datasets,
'usergoals_datasets': usergoals_datasets,
'usergoals_labels': usergoals_labels,
}
return render(request, 'goals/package_report.html', context)
def file_upload(request, object_type, pk):
"""Handler for drag-n-drop file uploads for Goals, and Actions.
NOTE: This only works for the `icon` field at the moment.
See: https://docs.djangoproject.com/en/1.8/topics/http/file-uploads/
"""
# The supported models.
objects = {
'goal': (Goal, UploadImageForm),
'action': (Action, UploadImageForm),
}
try:
model, form_class = objects.get(object_type, (None, None))
obj = get_object_or_404(model, pk=pk)
except ValueError:
logger.error("File upload failed {}.{}".format(object_type, pk))
return HttpResponseBadRequest()
errors = ""
if request.method == "POST":
form = form_class(request.POST, request.FILES)
if form.is_valid():
obj.icon = request.FILES['file']
obj.save()
else:
errors = str(form.errors)
return HttpResponse()
# Assume something went wrong.
logger.error("File upload failed {}".format(errors))
return HttpResponseBadRequest(errors)
def admin_batch_assign_keywords(request):
if request.method == "POST":
goal_ids = request.POST.get('ids', '')
goals = Goal.objects.filter(id__in=goal_ids.split('+'))
keywords = request.POST.get('keywords', '')
keywords = keywords.split(",")
for goal in goals:
goal.keywords = goal.keywords + keywords
goal.save()
msg = "Keywords added to {0} goals.".format(goals.count())
messages.success(request, msg)
return redirect('/admin/goals/goal/')
else:
goal_ids = request.GET.get('ids', '')
goals = Goal.objects.filter(id__in=goal_ids.split('+'))
context = {
'app_label': 'goals',
'title': 'Add Keywords',
'opts': {'app_label': 'goals'},
'original': None,
'adminform': None,
'goal_ids': goal_ids,
'goals': goals,
}
return render(request, 'goals/admin_batch_assign_keywords.html', context)
@job
def _duplicate_category_content(category, prefix=None):
"""Given a category and an optional prefix, duplicate all of it's content;
NOTE: This is an async RQ task, defined here, because otherwise the
view below won't be able to import it (or least I couldn't get it to
work)."""
if prefix:
category.duplicate_content(prefix)
else:
category.duplicate_content()
@user_passes_test(staff_required, login_url='/')
def duplicate_content(request, pk, title_slug):
category = get_object_or_404(Category, pk=pk, title_slug=title_slug)
if request.method == "POST":
form = TitlePrefixForm(request.POST)
if form.is_valid():
prefix = form.cleaned_data['prefix']
_duplicate_category_content.delay(category, prefix)
msg = (
"Your content is being duplicated and should be available in "
"about a minute."
)
messages.success(request, msg)
return redirect("goals:category-list")
else:
form = TitlePrefixForm()
context = {
'category': category,
'form': form,
}
return render(request, 'goals/duplicate_content.html', context)
class DebugToolsView(TemplateView):
template_name = "goals/debug_tools.html"
@user_passes_test(staff_required, login_url='/')
def debug_sequence(request):
"""
List all of the user's selected content ordered by "next_in_sequence"
"""
User = get_user_model()
email = request.GET.get('email_address', None)
useractions = []
if email is None:
form = EmailForm()
else:
form = EmailForm(initial={'email_address': email})
try:
user = User.objects.get(email__icontains=email)
useractions = get_next_useractions_in_sequence(user)
except (User.DoesNotExist, User.MultipleObjectsReturned):
messages.error(request, "Could not find that user")
context = {
'form': form,
'email': email,
'useractions': useractions,
}
return render(request, 'goals/debug_sequence.html', context)
@user_passes_test(staff_required, login_url='/')
def debug_priority_notifications(request):
"""
"""
User = get_user_model()
useractions = None
email = request.GET.get('email_address', None)
devices = None
if email is None:
form = EmailForm()
else:
form = EmailForm(initial={'email_address': email})
try:
user = User.objects.get(email__icontains=email)
# HIGH-priority UserActions
useractions = user.useraction_set.filter(
action__priority=Action.HIGH
).prefetch_related('action', 'custom_trigger').order_by("next_trigger_date")
# Get the user's devices
devices = user.gcmdevice_set.values_list('device_name', 'device_type')
except (User.DoesNotExist, User.MultipleObjectsReturned):
messages.error(request, "Could not find that user")
context = {
'devices': devices,
'form': form,
'email': email,
'useractions': useractions,
}
return render(request, 'goals/debug_priority_notifications.html', context)
@user_passes_test(staff_required, login_url='/')
def debug_notifications(request):
"""A view to allow searching by email addresss, then listing all UserActions
for a day, with all of the sheduled GCMNotifications for that user.
"""
# How many UserActions/CustomActions to display
num_items = int(request.GET.get('n', 25))
User = get_user_model()
customactions = None
useractions = None
next_user_action = None
today = None
next_in_sequence = []
upcoming_useractions = []
upcoming_customactions = []
user_queues = OrderedDict()
devices = None
email = request.GET.get('email_address', None)
if email is None:
form = EmailForm()
else:
form = EmailForm(initial={'email_address': email})
try:
user = User.objects.get(email__icontains=email)
today = local_day_range(user)
# UserActions
useractions = user.useraction_set.all().distinct()[:num_items]
next_in_sequence = get_next_useractions_in_sequence(user)
# Custom Actions
customactions = user.customaction_set.all()
customactions = customactions.order_by("next_trigger_date")
customactions = customactions.distinct()[:num_items]
next_user_action = user_feed.next_user_action(user)
upcoming_useractions = user_feed.todays_actions(user)
upcoming_customactions = user_feed.todays_customactions(user)
for ua in useractions:
ua.upcoming = ua in upcoming_useractions
for ca in customactions:
ca.upcoming = ca in upcoming_customactions
# The user's notification queue
dt = today[0]
days = sorted([dt + timedelta(days=i) for i in range(0, 7)])
for dt in days:
user_queues[dt.strftime("%Y-%m-%d")] = {}
for dt in days:
qdata = queue.UserQueue.get_data(user, date=dt)
# data for a user queue is a dict that looks like this:
# {'uq:1:2016-04-25:count': 0,
# 'uq:1:2016-04-25:high': [],
# 'uq:1:2016-04-25:low': [],
# 'uq:1:2016-04-25:medium': []}
for key, content in qdata.items():
parts = key.split(':')
datestring = parts[2]
key = parts[3]
user_queues[datestring][key] = content
# Get the user's devices
devices = user.gcmdevice_set.values_list('device_name', 'device_type')
except (User.DoesNotExist, User.MultipleObjectsReturned):
messages.error(request, "Could not find that user")
context = {
'devices': devices,
'num_items': num_items,
'form': form,
'email': email,
'useractions': useractions,
'customactions': customactions,
'next_user_action': next_user_action,
'next_in_sequence': next_in_sequence,
'upcoming_useractions': upcoming_useractions,
'upcoming_customactions': upcoming_customactions,
'today': today,
'user_queues': user_queues,
}
return render(request, 'goals/debug_notifications.html', context)
@user_passes_test(staff_required, login_url='/')
def debug_feed(request):
"""Ugh. List the data for the feed, the useractions with a "today"
next_trigger_date, and the created GCMMessages for today side-by-side.
"""
User = get_user_model()
feed_data = {}
today = None
notifs = None
ucas = []
email = request.GET.get('email_address', None)
feed_data_time = None
if email is None:
form = EmailForm()
else:
form = EmailForm(initial={'email_address': email})
try:
user = User.objects.get(email__icontains=email)
today = local_day_range(user)
# Feed data
# feed_useractions = user_feed.todays_actions(user)
# progress = user_feed.todays_actions_progress(user)
fd_start = time.time()
feed_data = user_feed.feed_data(user)
fd_end = time.time()
feed_data_time = fd_end - fd_start
# Additional info / UserCompletedActions + GCMMessages
ucas = user.usercompletedaction_set.filter(updated_on__range=today)
ucas = ucas.order_by("updated_on")
notifs = user.gcmmessage_set.filter(deliver_on__range=today)
notifs = notifs.order_by('deliver_on')
except (User.DoesNotExist, User.MultipleObjectsReturned):
messages.error(request, "Could not find that user")
context = {
'form': form,
'email': email,
'today': today,
'feed_data': feed_data,
'feed_data_time': feed_data_time,
'ucas': ucas,
'notifs': notifs,
}
return render(request, 'goals/debug_feed.html', context)
@user_passes_test(staff_required, login_url='/')
def debug_progress(request):
"""A view to allow searching by email addresss then view and
analyze their DailyProgress info.
"""
# -------------------------------------------------------------------------
# Figure out how to compile data for user "streaks", ie. days in a
# row in which a user has interacted with the app.
# -------------------------------------------------------------------------
# UserCompletedAction --> state=completed, updated_on dates
# DailyProgress has numbers aggregated for completed, snoozed, dismissed
# -------------------------------------------------------------------------
User = get_user_model()
email = request.GET.get('email_address', None)
user = None
form = EmailForm(initial={'email_address': email})
next_goals = []
next_actions = []
streaks = []
try:
user = User.objects.get(email__icontains=email)
except (User.DoesNotExist, User.MultipleObjectsReturned):
messages.error(request, "Could not find that user")
return redirect(reverse('goals:debug_progress'))
except ValueError:
user = None
today = timezone.now()
since = int(request.GET.get('since', 30))
from_date = today - timedelta(days=since)
daily_progresses = DailyProgress.objects.filter(
user=user,
updated_on__gte=from_date
)
# Completed Actions/Goals.
completed = {'actions': [], 'goals': []}
if user:
ucas = user.usercompletedaction_set.all()
completed['actions'] = ucas.filter(updated_on__gte=from_date)
goals = user.usergoal_set.filter(completed=True)
completed['goals'] = goals.filter(completed_on__gte=from_date)
# NEXT in sequence objeccts.
next_goals = user.usergoal_set.next_in_sequence(published=True)
next_actions = get_next_useractions_in_sequence(user)
next_actions = next_actions.order_by("next_trigger_date")
# Streaks.
# ---------------------------------------------------------------------
# Days in a row in which the user said "got it" (completed) or
# dismissed/snoozed an action.
def _fill_streaks(input_values, since, default_tup=()):
"""fills in data for missing dates"""
dates = sorted([dt.date() for dt in dates_range(since)])
index = 0 # index of the last, non-generated item
for dt in dates:
if index < len(input_values) and input_values[index][0] == dt:
yield input_values[index]
index += 1
else:
data = (dt, ) + default_tup
yield data
# Pulling from DailyProgress
streaks = daily_progresses.values_list(
'actions_completed', 'actions_snoozed', 'actions_dismissed',
'updated_on'
).order_by('updated_on')
streaks = [
(
updated.date(),
True if comp > 0 else (snoozed > 0 or dismissed > 0),
comp,
snoozed,
dismissed
)
for comp, snoozed, dismissed, updated in streaks
]
streaks = list(_fill_streaks(streaks, since, (False, 0, 0, 0)))
# ---------------------------------------------------------------------
context = {
'streaks': streaks,
'streaks_dates': [t[0].strftime("%Y-%m-%d") for t in streaks],
'email': email,
'searched_user': user,
'since': since,
'form': form,
'today': today,
'from_date': from_date,
'daily_progresses': daily_progresses,
'completed': completed,
'next_goals': next_goals,
'next_actions': next_actions,
}
return render(request, 'goals/debug_progress.html', context)
class ReportsView(ContentViewerMixin, TemplateView):
"""This view simply renders a template that lists the available reports
with a short description of each."""
template_name = "goals/reports.html"
class ReportPopularView(ContentViewerMixin, TemplateView):
template_name = "goals/report_popular.html"
def get(self, request, *args, **kwargs):
"""Include the most popular content in the conext prior to rendering"""
context = self.get_context_data(**kwargs)
context['popular_categories'] = popular_categories()
context['popular_goals'] = popular_goals()
context['popular_actions'] = popular_actions()
return self.render_to_response(context)
@user_passes_test(staff_required, login_url='/')
def report_triggers(request):
triggers = Trigger.objects.all()
total_trigger_count = triggers.count()
custom_trigger_count = triggers.filter(user__isnull=False).count()
with_recurrences = triggers.filter(recurrences__isnull=False).count()
time_and_date_only = triggers.filter(
trigger_date__isnull=False,
time__isnull=False,
recurrences__isnull=True
).count()
time_only = triggers.filter(
time__isnull=False,
trigger_date__isnull=True,
recurrences__isnull=True
).count()
date_only = triggers.filter(
trigger_date__isnull=False,
time__isnull=True,
recurrences__isnull=True
).count()
# Count all the recurrence options
custom_triggers = triggers.filter(
user__isnull=False,
recurrences__isnull=False
)
custom_recurrences = []
for t in custom_triggers:
custom_recurrences.append(t.recurrences_as_text())
custom_recurrences = Counter(custom_recurrences)
# Counts for time of day / frequency
tods = Trigger.objects.filter(time_of_day__gt='')
tod_counter = Counter(tods.values_list("time_of_day", flat=True))
freqs = Trigger.objects.filter(frequency__gt='')
freq_counter = Counter(freqs.values_list("frequency", flat=True))
context = {
'total_trigger_count': total_trigger_count,
'custom_trigger_count': custom_trigger_count,
'default_trigger_count': total_trigger_count - custom_trigger_count,
'with_recurrences': with_recurrences,
'time_and_date_only': time_and_date_only,
'time_only': time_only,
'date_only': date_only,
'custom_recurrences': custom_recurrences.most_common(20),
'tod_counter': dict(tod_counter),
'freq_counter': dict(freq_counter),
}
return render(request, 'goals/report_triggers.html', context)
@user_passes_test(staff_required, login_url='/')
def report_authors(request):
author_criteria = None
author = request.GET.get('email', None)
if author is not None:
author_criteria = Q(created_by__email__istartswith=author.strip())
# Count of who's got how many items in what state.
states = ['draft', 'published', 'pending-review', 'declined']
goals = {}
actions = {}
for state in states:
items = Goal.objects.filter(state=state)
if author_criteria:
items = items.filter(author_criteria)
items = items.values_list('created_by__email', flat=True)
goals[state] = dict(Counter(items))
items = Action.objects.filter(state=state)
if author_criteria:
items = items.filter(author_criteria)
items = items.values_list('created_by__email', flat=True)
actions[state] = dict(Counter(items))
context = {
'goals': goals,
'actions': actions,
}
return render(request, 'goals/report_authors.html', context)
@user_passes_test(staff_required, login_url='/')
def report_actions(request):
"""Information about our Action content.
This view contains code for several "sub reports". They are:
- notif: List actions with "long" notifictation_text
- desc: List actions with "long" description text
- links: List actions whose more_info/descriptions contain links
- triggers: Filter actions based on their default_triggers (whether they're
dynamic, contain advanced options, or some combination of both).
"""
limit = 100
subreport = request.GET.get('sub', None)
max_len = int(request.GET.get('len', 200))
valid_options = [200, 300, 400, 600, 800, 1000]
# Notifications will be shorter, so use a different set of valid lengths
if subreport == "notif":
valid_options = [90, 100, 150]
if max_len not in valid_options:
max_len = valid_options[0]
actions = Action.objects.all()
context = {
'limit': limit,
'actions': None,
'total': actions.count(),
'max_len': max_len,
'len_options': valid_options,
'subreport': subreport,
}
for state in ['draft', 'published', 'pending-review', 'declined']:
key = "{}_count".format(state.replace('-', ''))
data = {key: actions.filter(state=state).count()}
context.update(data)
if subreport == "desc": # Long descriptions
actions = actions.annotate(text_len=Length('description'))
actions = actions.filter(text_len__gt=max_len)
context['actions'] = actions.order_by('-text_len')[:limit]
context['subreport_title'] = "Long Descriptions"
elif subreport == "notif": # Long notifiation text
actions = actions.annotate(text_len=Length('notification_text'))
actions = actions.filter(text_len__gt=max_len)
context['actions'] = actions.order_by('-text_len')[:limit]
context['subreport_title'] = "Long Notification Text"
elif subreport == "links": # description / more_info contains URLs
actions = actions.filter(
Q(description__icontains='http') |
Q(more_info__icontains='http')
)
actions = actions.annotate(text_len=Length('description'))
context['actions'] = actions[:limit]
context['subreport_title'] = "Containing Links"
context['len_options'] = []
elif subreport == "triggers" and request.GET.get('trigger') == 'dynamic':
# List dynamic triggers
actions = actions.filter(
default_trigger__time_of_day__isnull=False,
default_trigger__frequency__isnull=False,
)
context['actions'] = actions[:limit]
context['subreport_title'] = "Trigger options"
context['len_options'] = []
context['trigger'] = 'dynamic'
elif subreport == "triggers" and request.GET.get('trigger') == 'advanced':
actions = actions.filter(
default_trigger__time_of_day__isnull=True,
default_trigger__frequency__isnull=True,
)
context['actions'] = actions[:limit]
context['subreport_title'] = "Trigger options"
context['len_options'] = []
context['trigger'] = 'advanced'
elif subreport == "triggers" and request.GET.get('trigger') == 'time':
actions = actions.filter(
default_trigger__time_of_day__isnull=False,
default_trigger__frequency__isnull=True,
)
context['actions'] = actions[:limit]
context['subreport_title'] = "Trigger options"
context['len_options'] = []
context['trigger'] = 'time'
elif subreport == "triggers" and request.GET.get('trigger') == 'freq':
actions = actions.filter(
default_trigger__time_of_day__isnull=True,
default_trigger__frequency__isnull=False,
)
context['actions'] = actions[:limit]
context['subreport_title'] = "Trigger options"
context['len_options'] = []
context['trigger'] = 'freq'
elif subreport == "triggers" and request.GET.get('trigger') == 'none':
actions = actions.filter(default_trigger__isnull=True)
context['actions'] = actions[:limit]
context['subreport_title'] = "Trigger options"
context['len_options'] = []
context['trigger'] = 'none'
elif subreport == "emptyfields":
context['subreport_title'] = "Empty Action Fields"
total_actions = Action.objects.count()
source_link = Action.objects.filter(source_link='').count()
source_notes = Action.objects.filter(source_notes='').count()
notes = Action.objects.filter(notes='').count()
more_info = Action.objects.filter(more_info='').count()
description = Action.objects.filter(description='').count()
external_resource = Action.objects.filter(external_resource='').count()
external_resource_name = Action.objects.filter(external_resource_name='').count()
notification_text = Action.objects.filter(notification_text='').count()
# report is a list of (fieldname, empty_count, difference)
context['report'] = [
('source_link', source_link, total_actions - source_link),
('source_notes', source_notes, total_actions - source_notes),
('notes', notes, total_actions - notes),
('more_info', more_info, total_actions - more_info),
('description', description, total_actions - description),
('external_resource', external_resource,
total_actions - external_resource),
('external_resource_name', external_resource_name,
total_actions - external_resource_name),
('notification_text', notification_text,
total_actions - notification_text),
]
context['total_actions'] = total_actions
return render(request, 'goals/report_actions.html', context)
@user_passes_test(staff_required, login_url='/')
def report_engagement(request):
"""A report on User-engagement in the app.
Questions:
- are they checking in?
- are they doing anything with notifications?
- should we show aggregate data...
- or make it searchable by user.
"""
since = timezone.now() - timedelta(days=30)
dps = DailyProgress.objects.filter(created_on__gte=since, actions_total__gt=0)
aggregates = dps.aggregate(
Sum('actions_completed'),
Sum('actions_snoozed'),
Sum('actions_dismissed'),
)
# Do we have notification engagement data?
has_engagement = all(aggregates.values())
ca_aggregates = dps.aggregate(
Sum('customactions_completed'),
Sum('customactions_snoozed'),
Sum('customactions_dismissed'),
)
# Do we have custom notification engagement data?
has_ca_engagement = all(ca_aggregates.values())
engagement = defaultdict(Counter)
if has_engagement:
# WANT: count daily values for each interaction, e.g.
# d[2016-05-14] = {'completed': 25, 'snooozed': 10, 'dismissed': 5}
# d[2016-05-15] = {'completed': 25, 'snooozed': 10, 'dismissed': 5}
for dp in dps:
dt = dp.created_on.strftime("%Y-%m-%d")
engagement[dt]['snoozed'] += dp.actions_snoozed
engagement[dt]['completed'] += dp.actions_completed
engagement[dt]['dismissed'] += dp.actions_dismissed
# now convert to a sorted list of tuples
# (2016-01-02, snoozed, dismissed, completed)
engagement = sorted([
(t, data['snoozed'], data['dismissed'], data['completed'])
for t, data in engagement.items()
])
# Same as engagement, but for custom actions
ca_engagement = defaultdict(Counter)
if has_ca_engagement:
for dp in dps:
dt = dp.created_on.strftime("%Y-%m-%d")
ca_engagement[dt]['snoozed'] += dp.customactions_snoozed
ca_engagement[dt]['completed'] += dp.customactions_completed
ca_engagement[dt]['dismissed'] += dp.customactions_dismissed
ca_engagement = sorted([
(t, data['snoozed'], data['dismissed'], data['completed'])
for t, data in ca_engagement.items()
])
context = {
'since': since,
'progresses': dps,
'aggregates': aggregates,
'ca_aggregates': ca_aggregates,
'has_engagement': has_engagement,
'engagement': engagement,
'has_ca_engagement': has_ca_engagement,
'ca_engagement': ca_engagement,
}
return render(request, 'goals/report_engagement.html', context)
@user_passes_test(staff_required, login_url='/')
def report_organization(request, pk=None):
"""A report on Organization member's selected content.
If a `pk` is provided, we'll display info for that organization. Otherwise,
we'll list the organizations.
"""
N = 10 # The top N goals.
try:
org = Organization.objects.get(pk=pk)
member_ids = org.members.values_list('pk', flat=True)
org_cats = org.program_set.values_list('categories', flat=True)
# All of the Goals selected by org members that aren't org-related goals.
org_goals = UserGoal.objects.filter(
user__in=member_ids,
goal__categories__in=org_cats
).values_list("goal__title", flat=True)
org_goals = Counter(org_goals).most_common(N)
org_goals = sorted(org_goals, key=lambda t: t[1], reverse=True)
nonorg_goals = UserGoal.objects.filter(user__in=member_ids)
# Exclude the organization's program goals
nonorg_goals = nonorg_goals.exclude(goal__categories__in=org_cats)
# Exclude any goals in which users are globally auto-enrolled.
nonorg_goals = nonorg_goals.exclude(
goal__categories__selected_by_default=True
)
# User IDs for all of the org members who've selected content outside
# of the organization's programs.
public_users = set(nonorg_goals.values_list("user", flat=True))
nonorg_goals = nonorg_goals.values_list("goal__title", flat=True)
nonorg_goals = Counter(nonorg_goals).most_common(N)
nonorg_goals = sorted(nonorg_goals, key=lambda t: t[1], reverse=True)
# What % of org members select public goals.
percentage = (len(public_users) / len(set(member_ids))) * 100
except Organization.DoesNotExist:
org = None
org_goals = []
nonorg_goals = []
percentage = None
context = {
'organization': org,
'organizations': Organization.objects.values_list("id", "name"),
'organization_goals': org_goals,
'non_organization_goals': nonorg_goals,
'percentage': percentage,
}
return render(request, 'goals/report_organization.html', context)
def fake_api(request, option=None):
"""Return a 'fake' api response. This is a view that returns fake/dummy
data for api endpoints that we may have removed; This will prevent an
older version of the app from crashing when it tries to hit an endpoint.
"""
metric("fake-api-{}".format(option), category="Tombstones")
if option in ['goalprogress', 'behaviorprogress', 'userbehavior', 'behavior']:
# /api/users/behaviors/progress/
# /api/users/behaviors/
# /api/behaviors/
# /api/users/goals/progress/
return JsonResponse({
"count": 0,
"next": None,
"previous": None,
"results": []
})
elif option == 'goalprogressaverage':
# /api/api/users/goals/progress/average/
return JsonResponse({
"text": "Great job! You're doing well!",
"daily_checkin_avg": 0,
"weekly_checkin_avg": 0,
"better": True
})
return HttpResponseNotFound()
| {
"content_hash": "6134eb6107b07b96c2eabc543fc80051",
"timestamp": "",
"source": "github",
"line_count": 2558,
"max_line_length": 90,
"avg_line_length": 37.1790461297889,
"alnum_prop": 0.6164514636608345,
"repo_name": "tndatacommons/tndata_backend",
"id": "cf0f147719e57c35281bfa5875636b0d28276b8e",
"size": "95104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/goals/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
import murraylab_tools.echo as mt_echo
@pytest.mark.skip(reason="tests not yet implmented")
class TestEchoFunctions():
def test_implement_me(self):
assert 0
| {
"content_hash": "e83e32fb849f957bdb1c632ec0b8a9f9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 52,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7401960784313726,
"repo_name": "sclamons/murraylab_tools",
"id": "519444b5062f7c39bcaabcdc74981a0fb3d1180b",
"size": "204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "murraylab_tools/tests/echo_tests/test_misc_echo_functions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244760"
},
{
"name": "Shell",
"bytes": "307"
}
],
"symlink_target": ""
} |
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping() #记录用户最近登录时间
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash(u'有户名或密码错误')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash(u'已登出')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash(u'验证邮件发你账户了,注意接收')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash(u'验证通过 感谢加入:)')
else:
flash(u'咋回事 验证无效 重来一遍吧^ ^')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, u'账户验证',
'auth/email/confirm', user=current_user, token=token)
flash(u'新邮件发你账户了,注意接收呦~')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash(u'密码已修改')
return redirect(url_for('main.index'))
else:
flash(u'密码无效')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash(u'邮件验证后密码后更改生效')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash(u'密码重置成功')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash(u'邮件已发送至新邮箱')
return redirect(url_for('main.index'))
else:
flash(u'邮箱或密码无效')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash(u'邮箱已更改')
else:
flash(u'邮箱更改失败')
return redirect(url_for('main.index'))
| {
"content_hash": "5fc64be8cfb09aaee4df812e682727c2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 34.26993865030675,
"alnum_prop": 0.6308628714643753,
"repo_name": "mvbn6789/flask-blog",
"id": "6178eace98de9ca1b7190ee8a7b229b80b41fba4",
"size": "5870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "231555"
},
{
"name": "HTML",
"bytes": "243128"
},
{
"name": "JavaScript",
"bytes": "13826"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "66155"
}
],
"symlink_target": ""
} |
"""
Test suite for the docx.parts.document module
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from mock import Mock
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.opc.package import PartFactory
from docx.opc.packuri import PackURI
from docx.oxml.parts.document import CT_Body, CT_Document
from docx.oxml.text import CT_R
from docx.package import ImageParts, Package
from docx.parts.document import _Body, DocumentPart, InlineShapes
from docx.parts.image import ImagePart
from docx.shape import InlineShape
from docx.table import Table
from docx.text import Paragraph
from ..oxml.unitdata.dml import a_drawing, an_inline
from ..oxml.parts.unitdata.document import a_body, a_document
from ..oxml.unitdata.table import (
a_gridCol, a_tbl, a_tblGrid, a_tblPr, a_tc, a_tr
)
from ..oxml.unitdata.text import a_p, a_sectPr, an_r
from ..unitutil import (
function_mock, class_mock, initializer_mock, instance_mock, loose_mock,
method_mock, property_mock
)
class DescribeDocumentPart(object):
def it_is_used_by_PartFactory_to_construct_main_document_part(
self, part_load_fixture):
# fixture ----------------------
document_part_load_, partname_, blob_, package_, document_part_ = (
part_load_fixture
)
content_type = CT.WML_DOCUMENT_MAIN
reltype = RT.OFFICE_DOCUMENT
# exercise ---------------------
part = PartFactory(partname_, content_type, reltype, blob_, package_)
# verify -----------------------
document_part_load_.assert_called_once_with(
partname_, content_type, blob_, package_
)
assert part is document_part_
def it_can_be_constructed_by_opc_part_factory(
self, oxml_fromstring_, init):
# mockery ----------------------
partname, content_type, blob, document_elm, package = (
Mock(name='partname'), Mock(name='content_type'),
Mock(name='blob'), Mock(name='document_elm'),
Mock(name='package')
)
oxml_fromstring_.return_value = document_elm
# exercise ---------------------
doc = DocumentPart.load(partname, content_type, blob, package)
# verify -----------------------
oxml_fromstring_.assert_called_once_with(blob)
init.assert_called_once_with(
partname, content_type, document_elm, package
)
assert isinstance(doc, DocumentPart)
def it_can_add_a_paragraph(self, add_paragraph_fixture):
document_part, body_, p_ = add_paragraph_fixture
p = document_part.add_paragraph()
body_.add_paragraph.assert_called_once_with()
assert p is p_
def it_can_add_a_table(self, add_table_fixture):
document_part, rows, cols, body_, table_ = add_table_fixture
table = document_part.add_table(rows, cols)
body_.add_table.assert_called_once_with(rows, cols)
assert table is table_
def it_can_add_an_image_part_to_the_document(
self, get_or_add_image_fixture):
(document, image_descriptor_, image_parts_, relate_to_, image_part_,
rId_) = get_or_add_image_fixture
image_part, rId = document.get_or_add_image_part(image_descriptor_)
image_parts_.get_or_add_image_part.assert_called_once_with(
image_descriptor_
)
relate_to_.assert_called_once_with(image_part_, RT.IMAGE)
assert image_part is image_part_
assert rId == rId_
def it_has_a_body(self, document_body_fixture):
document, _Body_, body_elm = document_body_fixture
_body = document.body
_Body_.assert_called_once_with(body_elm)
assert _body is _Body_.return_value
def it_can_serialize_to_xml(self, document_blob_fixture):
document_part, document_elm, serialize_part_xml_ = (
document_blob_fixture
)
blob = document_part.blob
serialize_part_xml_.assert_called_once_with(document_elm)
assert blob is serialize_part_xml_.return_value
def it_provides_access_to_the_document_paragraphs(
self, paragraphs_fixture):
document_part, paragraphs_ = paragraphs_fixture
paragraphs = document_part.paragraphs
assert paragraphs is paragraphs_
def it_provides_access_to_the_document_tables(self, tables_fixture):
document_part, tables_ = tables_fixture
tables = document_part.tables
assert tables is tables_
def it_provides_access_to_the_inline_shapes_in_the_document(
self, inline_shapes_fixture):
document, InlineShapes_, body_elm = inline_shapes_fixture
inline_shapes = document.inline_shapes
InlineShapes_.assert_called_once_with(body_elm, document)
assert inline_shapes is InlineShapes_.return_value
def it_knows_it_is_the_part_its_child_objects_belong_to(self, document):
assert document.part is document
def it_knows_the_next_available_xml_id(self, next_id_fixture):
document, expected_id = next_id_fixture
assert document.next_id == expected_id
# fixtures -------------------------------------------------------
@pytest.fixture
def add_paragraph_fixture(self, document_part_body_, body_, p_):
document_part = DocumentPart(None, None, None, None)
return document_part, body_, p_
@pytest.fixture
def add_table_fixture(self, document_part_body_, body_, table_):
document_part = DocumentPart(None, None, None, None)
rows, cols = 2, 4
return document_part, rows, cols, body_, table_
@pytest.fixture
def _Body_(self, request):
return class_mock(request, 'docx.parts.document._Body')
@pytest.fixture
def body_(self, request, p_, table_):
body_ = instance_mock(request, _Body)
body_.add_paragraph.return_value = p_
body_.add_table.return_value = table_
return body_
@pytest.fixture
def blob_(self, request):
return instance_mock(request, str)
@pytest.fixture
def content_type_(self, request):
return instance_mock(request, str)
@pytest.fixture
def document(self):
return DocumentPart(None, None, None, None)
@pytest.fixture
def document_blob_fixture(self, request, serialize_part_xml_):
document_elm = instance_mock(request, CT_Document)
document_part = DocumentPart(None, None, document_elm, None)
return document_part, document_elm, serialize_part_xml_
@pytest.fixture
def document_body_fixture(self, request, _Body_):
document_elm = (
a_document().with_nsdecls().with_child(
a_body())
).element
body_elm = document_elm[0]
document = DocumentPart(None, None, document_elm, None)
return document, _Body_, body_elm
@pytest.fixture
def document_part_(self, request):
return instance_mock(request, DocumentPart)
@pytest.fixture
def document_part_body_(self, request, body_):
return property_mock(
request, DocumentPart, 'body', return_value=body_
)
@pytest.fixture
def document_part_load_(self, request):
return method_mock(request, DocumentPart, 'load')
@pytest.fixture
def get_or_add_image_fixture(
self, request, package_, image_descriptor_, image_parts_,
relate_to_, image_part_, rId_):
package_.image_parts = image_parts_
document = DocumentPart(None, None, None, package_)
return (
document, image_descriptor_, image_parts_, relate_to_,
image_part_, rId_
)
@pytest.fixture
def image_descriptor_(self, request):
return instance_mock(request, str)
@pytest.fixture
def image_part_(self, request):
return instance_mock(request, ImagePart)
@pytest.fixture
def image_parts_(self, request, image_part_):
image_parts_ = instance_mock(request, ImageParts)
image_parts_.get_or_add_image_part.return_value = image_part_
return image_parts_
@pytest.fixture
def init(self, request):
return initializer_mock(request, DocumentPart)
@pytest.fixture
def InlineShapes_(self, request):
return class_mock(request, 'docx.parts.document.InlineShapes')
@pytest.fixture
def inline_shapes_fixture(self, request, InlineShapes_):
document_elm = (
a_document().with_nsdecls().with_child(
a_body())
).element
body_elm = document_elm[0]
document = DocumentPart(None, None, document_elm, None)
return document, InlineShapes_, body_elm
@pytest.fixture(params=[
((), 1), ((1,), 2), ((2,), 1), ((1, 2, 3), 4), ((1, 2, 4), 3),
((0, 0), 1), ((0, 0, 1, 3), 2), (('foo', 1, 2), 3), ((1, 'bar'), 2)
])
def next_id_fixture(self, request):
existing_ids, expected_id = request.param
document_elm = a_document().with_nsdecls().element
for n in existing_ids:
p = a_p().with_nsdecls().element
p.set('id', str(n))
document_elm.append(p)
document = DocumentPart(None, None, document_elm, None)
return document, expected_id
@pytest.fixture
def oxml_fromstring_(self, request):
return function_mock(request, 'docx.parts.document.oxml_fromstring')
@pytest.fixture
def p_(self, request):
return instance_mock(request, Paragraph)
@pytest.fixture
def package_(self, request):
return instance_mock(request, Package)
@pytest.fixture
def paragraphs_(self, request):
return instance_mock(request, list)
@pytest.fixture
def paragraphs_fixture(self, document_part_body_, body_, paragraphs_):
document_part = DocumentPart(None, None, None, None)
body_.paragraphs = paragraphs_
return document_part, paragraphs_
@pytest.fixture
def part_load_fixture(
self, document_part_load_, partname_, blob_, package_,
document_part_):
document_part_load_.return_value = document_part_
return (
document_part_load_, partname_, blob_, package_, document_part_
)
@pytest.fixture
def partname_(self, request):
return instance_mock(request, PackURI)
@pytest.fixture
def relate_to_(self, request, rId_):
relate_to_ = method_mock(request, DocumentPart, 'relate_to')
relate_to_.return_value = rId_
return relate_to_
@pytest.fixture
def rId_(self, request):
return instance_mock(request, str)
@pytest.fixture
def serialize_part_xml_(self, request):
return function_mock(
request, 'docx.parts.document.serialize_part_xml'
)
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table)
@pytest.fixture
def tables_(self, request):
return instance_mock(request, list)
@pytest.fixture
def tables_fixture(self, document_part_body_, body_, tables_):
document_part = DocumentPart(None, None, None, None)
body_.tables = tables_
return document_part, tables_
class Describe_Body(object):
def it_can_add_a_paragraph(self, add_paragraph_fixture):
body, expected_xml = add_paragraph_fixture
p = body.add_paragraph()
assert body._body.xml == expected_xml
assert isinstance(p, Paragraph)
def it_can_add_a_table(self, add_table_fixture):
body, expected_xml = add_table_fixture
table = body.add_table(rows=1, cols=1)
assert body._body.xml == expected_xml
assert isinstance(table, Table)
def it_can_clear_itself_of_all_content_it_holds(
self, clear_content_fixture):
body, expected_xml = clear_content_fixture
_body = body.clear_content()
assert body._body.xml == expected_xml
assert _body is body
def it_provides_access_to_the_paragraphs_it_contains(
self, body_with_paragraphs):
body = body_with_paragraphs
paragraphs = body.paragraphs
assert len(paragraphs) == 2
for p in paragraphs:
assert isinstance(p, Paragraph)
def it_provides_access_to_the_tables_it_contains(
self, body_with_tables):
body = body_with_tables
tables = body.tables
assert len(tables) == 2
for table in tables:
assert isinstance(table, Table)
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
(0, False), (1, False), (0, True), (1, True)
])
def add_paragraph_fixture(self, request):
p_count, has_sectPr = request.param
# body element -----------------
body_bldr = self._body_bldr(p_count=p_count, sectPr=has_sectPr)
body_elm = body_bldr.element
body = _Body(body_elm)
# expected XML -----------------
p_count += 1
body_bldr = self._body_bldr(p_count=p_count, sectPr=has_sectPr)
expected_xml = body_bldr.xml()
return body, expected_xml
@pytest.fixture(params=[(0, False), (0, True), (1, False), (1, True)])
def add_table_fixture(self, request):
p_count, has_sectPr = request.param
body_bldr = self._body_bldr(p_count=p_count, sectPr=has_sectPr)
body = _Body(body_bldr.element)
tbl_bldr = self._tbl_bldr()
body_bldr = self._body_bldr(
p_count=p_count, tbl_bldr=tbl_bldr, sectPr=has_sectPr
)
expected_xml = body_bldr.xml()
return body, expected_xml
@pytest.fixture
def body_with_paragraphs(self):
body_elm = (
a_body().with_nsdecls()
.with_child(a_p())
.with_child(a_p())
.element
)
return _Body(body_elm)
@pytest.fixture
def body_with_tables(self):
body_elm = (
a_body().with_nsdecls()
.with_child(a_tbl())
.with_child(a_tbl())
.element
)
return _Body(body_elm)
@pytest.fixture(params=[False, True])
def clear_content_fixture(self, request):
has_sectPr = request.param
# body element -----------------
body_bldr = a_body().with_nsdecls()
body_bldr.with_child(a_p())
if has_sectPr:
body_bldr.with_child(a_sectPr())
body_elm = body_bldr.element
body = _Body(body_elm)
# expected XML -----------------
body_bldr = a_body().with_nsdecls()
if has_sectPr:
body_bldr.with_child(a_sectPr())
expected_xml = body_bldr.xml()
return body, expected_xml
def _body_bldr(self, p_count=0, tbl_bldr=None, sectPr=False):
body_bldr = a_body().with_nsdecls()
for i in range(p_count):
body_bldr.with_child(a_p())
if tbl_bldr is not None:
body_bldr.with_child(tbl_bldr)
if sectPr:
body_bldr.with_child(a_sectPr())
return body_bldr
def _tbl_bldr(self, rows=1, cols=1):
tblPr_bldr = a_tblPr()
tblGrid_bldr = a_tblGrid()
for i in range(cols):
tblGrid_bldr.with_child(a_gridCol())
tbl_bldr = a_tbl()
tbl_bldr.with_child(tblPr_bldr)
tbl_bldr.with_child(tblGrid_bldr)
for i in range(rows):
tr_bldr = self._tr_bldr(cols)
tbl_bldr.with_child(tr_bldr)
return tbl_bldr
def _tc_bldr(self):
return a_tc().with_child(a_p())
def _tr_bldr(self, cols):
tr_bldr = a_tr()
for i in range(cols):
tc_bldr = self._tc_bldr()
tr_bldr.with_child(tc_bldr)
return tr_bldr
class DescribeInlineShapes(object):
def it_knows_how_many_inline_shapes_it_contains(
self, inline_shapes_fixture):
inline_shapes, inline_shape_count = inline_shapes_fixture
assert len(inline_shapes) == inline_shape_count
def it_can_iterate_over_its_InlineShape_instances(
self, inline_shapes_fixture):
inline_shapes, inline_shape_count = inline_shapes_fixture
actual_count = 0
for inline_shape in inline_shapes:
assert isinstance(inline_shape, InlineShape)
actual_count += 1
assert actual_count == inline_shape_count
def it_provides_indexed_access_to_inline_shapes(
self, inline_shapes_fixture):
inline_shapes, inline_shape_count = inline_shapes_fixture
for idx in range(-inline_shape_count, inline_shape_count):
inline_shape = inline_shapes[idx]
assert isinstance(inline_shape, InlineShape)
def it_raises_on_indexed_access_out_of_range(
self, inline_shapes_fixture):
inline_shapes, inline_shape_count = inline_shapes_fixture
with pytest.raises(IndexError):
too_low = -1 - inline_shape_count
inline_shapes[too_low]
with pytest.raises(IndexError):
too_high = inline_shape_count
inline_shapes[too_high]
def it_can_add_an_inline_picture_to_the_document(
self, add_picture_fixture):
# fixture ----------------------
(inline_shapes, image_descriptor_, document_, InlineShape_,
r_, image_part_, rId_, shape_id_, new_picture_shape_
) = add_picture_fixture
# exercise ---------------------
picture_shape = inline_shapes.add_picture(image_descriptor_)
# verify -----------------------
document_.get_or_add_image_part.assert_called_once_with(
image_descriptor_
)
InlineShape_.new_picture.assert_called_once_with(
r_, image_part_, rId_, shape_id_
)
assert picture_shape is new_picture_shape_
def it_knows_the_part_it_belongs_to(self, inline_shapes_with_parent_):
inline_shapes, parent_ = inline_shapes_with_parent_
part = inline_shapes.part
assert part is parent_.part
# fixtures -------------------------------------------------------
@pytest.fixture
def add_picture_fixture(
self, request, body_, document_, image_descriptor_, InlineShape_,
r_, image_part_, rId_, shape_id_, new_picture_shape_):
inline_shapes = InlineShapes(body_, None)
property_mock(request, InlineShapes, 'part', return_value=document_)
return (
inline_shapes, image_descriptor_, document_, InlineShape_, r_,
image_part_, rId_, shape_id_, new_picture_shape_
)
@pytest.fixture
def body_(self, request, r_):
body_ = instance_mock(request, CT_Body)
body_.add_p.return_value.add_r.return_value = r_
return body_
@pytest.fixture
def document_(self, request, rId_, image_part_, shape_id_):
document_ = instance_mock(request, DocumentPart, name='document_')
document_.get_or_add_image_part.return_value = image_part_, rId_
document_.next_id = shape_id_
return document_
@pytest.fixture
def image_part_(self, request):
return instance_mock(request, ImagePart)
@pytest.fixture
def image_descriptor_(self, request):
return instance_mock(request, str)
@pytest.fixture
def InlineShape_(self, request, new_picture_shape_):
InlineShape_ = class_mock(request, 'docx.parts.document.InlineShape')
InlineShape_.new_picture.return_value = new_picture_shape_
return InlineShape_
@pytest.fixture
def inline_shapes_fixture(self):
inline_shape_count = 2
body = (
a_body().with_nsdecls('w', 'wp').with_child(
a_p().with_child(
an_r().with_child(
a_drawing().with_child(
an_inline()))).with_child(
an_r().with_child(
a_drawing().with_child(
an_inline())
)
)
)
).element
inline_shapes = InlineShapes(body, None)
return inline_shapes, inline_shape_count
@pytest.fixture
def inline_shapes_with_parent_(self, request):
parent_ = loose_mock(request, name='parent_')
inline_shapes = InlineShapes(None, parent_)
return inline_shapes, parent_
@pytest.fixture
def new_picture_shape_(self, request):
return instance_mock(request, InlineShape)
@pytest.fixture
def r_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def rId_(self, request):
return instance_mock(request, str)
@pytest.fixture
def shape_id_(self, request):
return instance_mock(request, int)
| {
"content_hash": "e49b640f447a089170db536b8adfa08c",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 77,
"avg_line_length": 34.90651085141903,
"alnum_prop": 0.5966808551341528,
"repo_name": "ludoo/python-docx",
"id": "c5cf5a36baad262bc820ff8bc8d80729e595c4ea",
"size": "20928",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/parts/test_document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "575327"
}
],
"symlink_target": ""
} |
"""Tools for analyzing a Python codebase as a whole. Analysis happens
before indexing in order to compute data that requires knowledge of the
entire codebase.
"""
import ast
import os
from collections import defaultdict
from warnings import warn
from dxr.build import unicode_contents
from dxr.plugins.python.utils import (ClassFunctionVisitorMixin,
convert_node_to_fullname, package_for_module,
path_to_module, ast_parse)
class TreeAnalysis(object):
"""Performs post-build analysis and stores the results."""
def __init__(self, python_path, source_folder, paths):
"""Analyze the given paths.
:arg python_path: Absolute path to the root folder where Python
modules for the tree are stored.
:arg source_folder: Absolute path to the root folder storing all the
files in the tree. Used to generate relative paths when emitting
warnings.
:arg paths: Iterable containing tuples of the form (path, encoding)
for each file that should be analyzed.
"""
self.python_path = python_path
self.source_folder = source_folder
self.base_classes = defaultdict(list)
self.derived_classes = defaultdict(list)
self.class_functions = defaultdict(list)
self.overridden_functions = defaultdict(list)
self.overriding_functions = defaultdict(list)
self.names = {}
self.ignore_paths = set()
for path, encoding in paths:
self._analyze_file(path, encoding)
self._finish_analysis()
def _analyze_file(self, path, encoding):
"""Analyze an individual file. If the file isn't valid Python, add
it to the ignore_paths list on the analysis.
"""
try:
contents = unicode_contents(path, encoding)
if contents is None:
# Then we could not decode the file, nothing we can do here.
return
syntax_tree = ast_parse(contents)
except (IOError, SyntaxError, TypeError, UnicodeDecodeError) as error:
rel_path = os.path.relpath(path, self.source_folder)
warn('Failed to analyze {filename} due to error "{error}".'.format(
filename=rel_path, error=error))
self.ignore_paths.add(rel_path)
return
abs_module_name = path_to_module(self.python_path, path) # e.g. package.sub.current_file
visitor = AnalyzingNodeVisitor(abs_module_name, self)
visitor.visit(syntax_tree)
def _finish_analysis(self):
"""Finishes the analysis by computing some relations that
depend on the entire tree having been analyzed, such as method
overrides (which we can't compute until we've analyzed every
class method).
"""
# Compute derived classes from base class relations.
for class_name, bases in self.base_classes.iteritems():
for base_name in bases:
base_name = self.normalize_name(base_name)
self.derived_classes[base_name].append(class_name)
# Compute which functions override other functions.
for class_name, functions in self.class_functions.iteritems():
functions = set(functions)
base_classes = self.get_base_classes(class_name, set([class_name]))
# For each base class, find the union of functions within
# the current class and functions in the base; those are
# overridden methods!
for base_class in base_classes:
# Use get here to avoid modifying class_functions while
# looping over it.
base_class_functions = self.class_functions.get(base_class, set())
matches = functions.intersection(base_class_functions)
for match in matches:
function_qualname = class_name + '.' + match
overridden_qualname = base_class + '.' + match
self.overriding_functions[function_qualname].append(overridden_qualname)
# Compute which functions are overridden by which (the reverse
# of what we just computed above).
for function, overridden_functions in self.overriding_functions.iteritems():
for overridden_function in overridden_functions:
self.overridden_functions[overridden_function].append(function)
def get_base_classes(self, absolute_class_name, seen):
"""Return a list of all the classes that the given class
inherits from in their canonical form.
:arg seen: The set of normalized base class names already known. Python
doesn't permit actual inheritance cycles, but we don't currently
distinguish between a locally defined name and a name from the
built-in namespace, so something like
'class DeprecationWarning(DeprecationWarning)' (with no import
needed for the built-in DeprecationWarning) would lead to a cycle.
"""
for base in self.base_classes[absolute_class_name]:
base = self.normalize_name(base)
if base not in seen:
seen.add(base)
yield base
for base_parent in self.get_base_classes(base, seen):
yield base_parent
def get_derived_classes(self, absolute_class_name, seen):
"""Return a list of all the classes that derive from the given
class in their canonical form.
:arg seen: The set of normalized base class names already known. Python
doesn't permit actual inheritance cycles, but we don't currently
distinguish between a locally defined name and a name from the
built-in namespace, so something like
'class DeprecationWarning(DeprecationWarning)' (with no import
needed for the built-in DeprecationWarning) would lead to a cycle.
"""
for derived in self.derived_classes[absolute_class_name]:
if derived not in seen:
seen.add(derived)
yield derived
for derived_child in self.get_derived_classes(derived, seen):
yield derived_child
def normalize_name(self, absolute_local_name):
"""Given a local name, figure out the actual module that the
thing that name points to lives and return that name.
For example, if you have `from os import path` in a module
called `foo.bar`, then the name `foo.bar.path` would return
`os.path`.
"""
while absolute_local_name in self.names:
absolute_local_name = self.names[absolute_local_name]
mod, var = absolute_local_name
if mod is None: # Assuming `var` contains an absolute module name
return var
# When you refer to `imported_module.foo`, we need to normalize the
# `imported_module` prefix in case it's not the canonical name of
# that module.
if '.' in var:
prefix, local_name = var.rsplit('.', 1)
return self.normalize_name((mod, prefix)) + '.' + local_name
else:
return mod + "." + var
class AnalyzingNodeVisitor(ast.NodeVisitor, ClassFunctionVisitorMixin):
"""Node visitor that analyzes code for data we need prior to
indexing, including:
- A graph of imported names and the files that they were originally
defined in.
- A mapping of class names to the classes they inherit from.
"""
def __init__(self, abs_module_name, tree_analysis):
super(AnalyzingNodeVisitor, self).__init__()
self.abs_module_name = abs_module_name # name of the module we're walking
self.tree_analysis = tree_analysis
def visit_ClassDef(self, node):
super(AnalyzingNodeVisitor, self).visit_ClassDef(node)
# Save the base classes of any class we find.
class_path = self.abs_module_name + '.' + node.name
bases = []
for base in node.bases:
base_name = convert_node_to_fullname(base)
if base_name:
bases.append((self.abs_module_name, base_name))
self.tree_analysis.base_classes[class_path] = bases
def visit_ClassFunction(self, class_node, function_node):
"""Save any member functions we find on a class."""
class_path = self.abs_module_name + '.' + class_node.name
self.tree_analysis.class_functions[class_path].append(function_node.name)
def visit_Import(self, node):
self.analyze_import(node)
self.generic_visit(node)
def visit_ImportFrom(self, node):
self.analyze_import(node)
self.generic_visit(node)
def analyze_import(self, node):
"""Whenever we import something, remember the local name of what
was imported and where it actually lives.
"""
# We're processing statements like the following in the file
# corresponding to <self.abs_module_name>:
# [from <node.module>] import <alias.name> as <alias.asname>
#
# Try to find `abs_import_name`, so that the above is equivalent to
# import <abs_import_name> as <local_name>
# ...and store the mapping
# (abs_module_name, local_name) -> (abs_import_name)
for alias in node.names:
local_name = alias.asname or alias.name
absolute_local_name = self.abs_module_name, local_name
# TODO: we're assuming this is an absolute name, but it could also
# be relative to the current package or a var
abs_import_name = None, alias.name
if isinstance(node, ast.ImportFrom):
# `from . import x` means node.module is None.
if node.module:
abs_import_name = node.module, alias.name
else:
package_path = package_for_module(self.abs_module_name)
if package_path:
abs_import_name = package_path, alias.name
if absolute_local_name != abs_import_name:
self.tree_analysis.names[absolute_local_name] = abs_import_name
| {
"content_hash": "13ea4121467197e9283fc7e9b4b508c8",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 97,
"avg_line_length": 42.415637860082306,
"alnum_prop": 0.6203550984767634,
"repo_name": "pelmers/dxr",
"id": "faf1e3e922c2a4cd01b7d3ea8830b2c07d293680",
"size": "10307",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dxr/plugins/python/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1974"
},
{
"name": "C++",
"bytes": "81663"
},
{
"name": "CSS",
"bytes": "24071"
},
{
"name": "HTML",
"bytes": "44076"
},
{
"name": "IDL",
"bytes": "8448"
},
{
"name": "JavaScript",
"bytes": "82076"
},
{
"name": "Makefile",
"bytes": "10011"
},
{
"name": "Python",
"bytes": "736868"
},
{
"name": "Rust",
"bytes": "11710"
},
{
"name": "Shell",
"bytes": "2524"
}
],
"symlink_target": ""
} |
import unittest
class TestTransaction(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore.transaction import Transaction
return Transaction
def _make_one(self, client, **kw):
return self._get_target_class()(client, **kw)
def test_ctor_defaults(self):
from google.cloud.datastore._generated import datastore_pb2
_PROJECT = 'PROJECT'
connection = _Connection()
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
self.assertEqual(xact.project, _PROJECT)
self.assertIs(xact._client, client)
self.assertIsNone(xact.id)
self.assertEqual(xact._status, self._get_target_class()._INITIAL)
self.assertIsInstance(xact._commit_request,
datastore_pb2.CommitRequest)
self.assertIs(xact.mutations, xact._commit_request.mutations)
self.assertEqual(len(xact._partial_key_entities), 0)
def test_current(self):
_PROJECT = 'PROJECT'
connection = _Connection()
client = _Client(_PROJECT, connection)
xact1 = self._make_one(client)
xact2 = self._make_one(client)
self.assertIsNone(xact1.current())
self.assertIsNone(xact2.current())
with xact1:
self.assertIs(xact1.current(), xact1)
self.assertIs(xact2.current(), xact1)
with _NoCommitBatch(client):
self.assertIsNone(xact1.current())
self.assertIsNone(xact2.current())
with xact2:
self.assertIs(xact1.current(), xact2)
self.assertIs(xact2.current(), xact2)
with _NoCommitBatch(client):
self.assertIsNone(xact1.current())
self.assertIsNone(xact2.current())
self.assertIs(xact1.current(), xact1)
self.assertIs(xact2.current(), xact1)
self.assertIsNone(xact1.current())
self.assertIsNone(xact2.current())
def test_begin(self):
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact.begin()
self.assertEqual(xact.id, 234)
self.assertEqual(connection._begun, _PROJECT)
def test_begin_tombstoned(self):
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact.begin()
self.assertEqual(xact.id, 234)
self.assertEqual(connection._begun, _PROJECT)
xact.rollback()
self.assertIsNone(xact.id)
self.assertRaises(ValueError, xact.begin)
def test_begin_w_begin_transaction_failure(self):
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
connection._side_effect = RuntimeError
with self.assertRaises(RuntimeError):
xact.begin()
self.assertIsNone(xact.id)
self.assertEqual(connection._begun, _PROJECT)
def test_rollback(self):
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact.begin()
xact.rollback()
self.assertIsNone(xact.id)
self.assertEqual(connection._rolled_back, (_PROJECT, 234))
def test_commit_no_partial_keys(self):
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact._commit_request = commit_request = object()
xact.begin()
xact.commit()
self.assertEqual(connection._committed,
(_PROJECT, commit_request, 234))
self.assertIsNone(xact.id)
def test_commit_w_partial_keys(self):
_PROJECT = 'PROJECT'
_KIND = 'KIND'
_ID = 123
connection = _Connection(234)
connection._completed_keys = [_make_key(_KIND, _ID, _PROJECT)]
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact.begin()
entity = _Entity()
xact.put(entity)
xact._commit_request = commit_request = object()
xact.commit()
self.assertEqual(connection._committed,
(_PROJECT, commit_request, 234))
self.assertIsNone(xact.id)
self.assertEqual(entity.key.path, [{'kind': _KIND, 'id': _ID}])
def test_context_manager_no_raise(self):
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact._commit_request = commit_request = object()
with xact:
self.assertEqual(xact.id, 234)
self.assertEqual(connection._begun, _PROJECT)
self.assertEqual(connection._committed,
(_PROJECT, commit_request, 234))
self.assertIsNone(xact.id)
def test_context_manager_w_raise(self):
class Foo(Exception):
pass
_PROJECT = 'PROJECT'
connection = _Connection(234)
client = _Client(_PROJECT, connection)
xact = self._make_one(client)
xact._mutation = object()
try:
with xact:
self.assertEqual(xact.id, 234)
self.assertEqual(connection._begun, _PROJECT)
raise Foo()
except Foo:
self.assertIsNone(xact.id)
self.assertEqual(connection._rolled_back, (_PROJECT, 234))
self.assertIsNone(connection._committed)
self.assertIsNone(xact.id)
def _make_key(kind, id_, project):
from google.cloud.datastore._generated import entity_pb2
key = entity_pb2.Key()
key.partition_id.project_id = project
elem = key.path.add()
elem.kind = kind
elem.id = id_
return key
class _Connection(object):
_marker = object()
_begun = None
_rolled_back = None
_committed = None
_side_effect = None
def __init__(self, xact_id=123):
self._xact_id = xact_id
self._completed_keys = []
self._index_updates = 0
def begin_transaction(self, project):
self._begun = project
if self._side_effect is None:
return self._xact_id
else:
raise self._side_effect
def rollback(self, project, transaction_id):
self._rolled_back = project, transaction_id
def commit(self, project, commit_request, transaction_id):
self._committed = (project, commit_request, transaction_id)
return self._index_updates, self._completed_keys
class _Entity(dict):
def __init__(self):
super(_Entity, self).__init__()
from google.cloud.datastore.key import Key
self.key = Key('KIND', project='PROJECT')
class _Client(object):
def __init__(self, project, connection, namespace=None):
self.project = project
self._connection = connection
self.namespace = namespace
self._batches = []
def _push_batch(self, batch):
self._batches.insert(0, batch)
def _pop_batch(self):
return self._batches.pop(0)
@property
def current_batch(self):
return self._batches and self._batches[0] or None
class _NoCommitBatch(object):
def __init__(self, client):
from google.cloud.datastore.batch import Batch
self._client = client
self._batch = Batch(client)
def __enter__(self):
self._client._push_batch(self._batch)
return self._batch
def __exit__(self, *args):
self._client._pop_batch()
| {
"content_hash": "a7e2abed327b189790cb97829a77c382",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 73,
"avg_line_length": 32.03292181069959,
"alnum_prop": 0.593268242548818,
"repo_name": "jgeewax/gcloud-python",
"id": "c09304df6f5b5c5922982a5416a9e9e8efd2d190",
"size": "8360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "datastore/unit_tests/test_transaction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "158375"
},
{
"name": "Python",
"bytes": "2750680"
},
{
"name": "Shell",
"bytes": "3120"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TexttemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="texttemplate", parent_name="scatter", **kwargs):
super(TexttemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "d46e4ee33ae111372a3cfb7de1da4cb5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 38.583333333333336,
"alnum_prop": 0.6241900647948164,
"repo_name": "plotly/plotly.py",
"id": "f95e1320db750e43946cbddeb77b00b23fafbf63",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter/_texttemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Synchronization primitives backed by the datastore.
The `Mutex` class's interface matches the Python standard library's
`threading.Lock` class.
"""
import datetime
import random
import time
from google.appengine.ext import db
from google.appengine.api import datastore
class Mutex(db.Model):
"""A mutex backed by the datastore."""
LIFETIME = datetime.timedelta(milliseconds=20*1000)
_held = db.BooleanProperty(required=True, default=False)
_last_acquired = db.DateTimeProperty(required=True, auto_now=True)
def acquire(self, blocking=True, retry_delay_ms=100):
"""Acquire this mutex. Fails if it's already held.
Args:
blocking: boolean. If `True`, blocks until the mutex can be acquired. If
`False`, returns immediately.
retry_delay_ms: integer. The desired delay between acquisition retries.
Each actually delay is a random amount between half and twice this.
Returns:
`True` if the mutex was acquired, `False` otherwise.
"""
if not self.is_saved():
self.put()
while True:
mutex = db.run_in_transaction(self._acquire_or_fail)
if mutex is not None:
self._held = True
self._last_acquired = mutex._last_acquired
return True
elif not blocking:
return False
else:
retry_delay_s = float(retry_delay_ms) / 1000
time.sleep(random.uniform(retry_delay_s / 2, retry_delay_s * 2))
def release(self):
"""Releases the mutex. Raises `db.BadRequestError` if not currently held."""
if not self.is_saved():
raise db.BadRequestError("Can't release mutex that's not saved.")
db.run_in_transaction(self._release_or_fail)
self._held = False
def is_held(self):
"""Returns `True` if the mutex is held, `False` otherwise.
This takes expiration into account.
"""
elapsed = datetime.datetime.now() - self._last_acquired
return self._held and elapsed < Mutex.LIFETIME
def _acquire_or_fail(self):
"""Acquires the given mutex.
Intended to be run in a transaction.
Returns:
The mutex if it was acquired, otherwise `None`.
"""
mutex = Mutex.get(self.key())
if not mutex.is_held():
mutex._held = True
mutex.put()
return mutex
else:
return None
def _release_or_fail(self):
"""Releases the given mutex.
Raises `db.BadRequestError` if it's held.
Intended to be run in a transaction.
Returns:
Mutex
"""
mutex = Mutex.get(self.key())
if mutex.is_held():
mutex._held = False
mutex.put()
return mutex
else:
raise db.BadRequestError("Can't release mutex that's not held.")
def acquire(mutexes, blocking=True):
"""Acquires the given mutexes.
To prevent deadlocks, mutexes are acquired in order of the partial ordering
defined by their keys.
Args:
mutexes: Sequence of Mutex objects or keys
blocking: Boolean. If `True`, blocks until all mutexes can be acquired. If
`False`, returns immediately.
Returns:
`True` if all mutexes were acquired, `False` otherwise.
"""
mutexes = _to_mutexes(mutexes)
mutexes.sort(key=lambda m: m.key())
acquired = []
try:
for mutex in mutexes:
if mutex.acquire(blocking=blocking):
acquired.append(mutex)
else:
return False
finally:
if len(acquired) < len(mutexes):
release(acquired)
return True
def release(mutexes):
"""Releases the given mutexes.
Mutexes are released according to the reverse of the partial ordering
defined by their keys. This isn't strictly necessary to prevent deadlocks,
but it's still a nice practice.
Args:
mutexes: sequence of Mutex objects or keys
"""
mutexes = _to_mutexes(mutexes)
mutexes.sort(key=lambda m: m.key(), reverse=True)
for mutex in mutexes:
mutex.release()
def _to_mutexes(mutexes_or_keys):
"""Normalizes and type checks the given sequence.
Args:
mutexes_or_keys: sequence of Mutex objects or keys
Returns:
list of Mutexes
"""
mutexes = []
for arg in mutexes_or_keys:
if isinstance(arg, Mutex):
mutexes.append(arg)
elif isinstance(arg, db.Key):
mutexes.append(Mutex.get(arg))
else:
raise db.BadArgumentError(
'Expected a Mutex instance or key; received %s (a %s).' %
(arg, datastore.typename(arg)))
return mutexes
| {
"content_hash": "f5cfd0a439bc6cdbd644f7ab33cb2834",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 23.994535519125684,
"alnum_prop": 0.6606695513550445,
"repo_name": "GoogleCloudPlatform/appengine-python-standard",
"id": "05f40640a18c129e4ea86bb51dd1ec2a9dcbdd1a",
"size": "4993",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/google/appengine/ext/db/sync.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3778254"
}
],
"symlink_target": ""
} |
"""
Bagelbot script for checking for attendance for an upcoming bagelbot meeting.
"""
import logging
import sys
import time
from datetime import datetime, timedelta
from config import ATTENDANCE_TIME_LIMIT
from utils import YES, NO, initialize, nostdout, download_shelve_from_s3, upload_shelve_to_s3
def check_attendance(store, sc, users=None):
"""Pings all slack users with the email address stored in config.py.
It asks if they are available for today's meeting, and waits for a pre-determined amount of time.
If all users respond, or if the time limit is reached, the script exits
and writes today's upcoming meeting to the store.
Args:
store (instance): A persistent, dictionary-like object used to keep information about past/future meetings
sc (SlackClient): An instance of SlackClient
users (list): A list of users to ping for role call (overrides store['everyone'])
"""
start = datetime.now()
todays_meeting = {"date": start.date(), "available": [], "out": []}
if not users:
users = store["everyone"]
user_len = len(users)
messages_sent = {}
if sc.rtm_connect():
for user in users:
logging.info("Pinging %s...", user)
message = sc.api_call(
"chat.postMessage",
channel="@" + user,
as_user=True,
text="Will you be available for today's ({:%m/%d/%Y}) :coffee: and :bagel: meeting? (yes/no)".format(
todays_meeting["date"]
),
)
message["user"] = user
messages_sent[message["channel"]] = message
logging.info("Waiting for responses...")
while True:
try:
events = sc.rtm_read()
for event in events:
logging.debug(event)
if (
event["type"] == "message"
and event["channel"] in messages_sent
and float(event["ts"]) > float(messages_sent[event["channel"]]["ts"])
):
lower_txt = event["text"].lower().strip()
user = messages_sent[event["channel"]]["user"]
logging.info(
"%s responded with '%s'", user, event["text"].encode("ascii", "ignore")
)
user_responded = False
if lower_txt in YES:
user_responded = True
todays_meeting["available"].append(user)
sc.api_call(
"chat.postMessage",
channel=event["channel"],
as_user=True,
text="Your presence has been acknowledged! Thank you! :tada:",
)
elif lower_txt in NO:
user_responded = True
todays_meeting["out"].append(user)
sc.api_call(
"chat.postMessage",
channel=event["channel"],
as_user=True,
text="Your absence has been acknowledged! You will be missed! :cry:",
)
if user_responded:
# User has responded to bagelbot, don't listen to this channel anymore.
messages_sent.pop(event["channel"])
except:
logging.exception("Something went wrong reading Slack RTM Events.")
all_accounted_for = (
len(todays_meeting["available"]) + len(todays_meeting["out"]) == user_len
)
if (
datetime.now() > (start + timedelta(seconds=ATTENDANCE_TIME_LIMIT))
or all_accounted_for
):
if not all_accounted_for:
# Move any remaining users over to 'out' at the end of the time limit - assuming they aren't available
todays_meeting["out"] += [
u
for u in users
if u not in todays_meeting["available"] and u not in todays_meeting["out"]
]
logging.info(
"Finished! These people aren't available today: %s",
", ".join(todays_meeting["out"]),
)
# Store this upcoming meeting under a separate key for use by generate_meeting.py upon actual meeting generation.
store["upcoming"] = todays_meeting
break
else:
time.sleep(1)
else:
logging.info("Connection Failed, invalid token?")
def main(args):
"""
Initialize the shelf, possibly sync to s3, then check attendance, close
the shelf and maybe sync the shelf again.
Args:
args (ArgumentParser args): Parsed arguments that impact how the check_attandance runs
"""
if args.s3_sync:
download_shelve_from_s3()
if args.debug:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(message)s")
else:
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
store, sc = initialize(update_everyone=True)
try:
check_attendance(store, sc, users=args.users)
finally:
store.close()
if args.s3_sync:
upload_shelve_to_s3()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Check to see if any Slack members will be missing today's meeting."
)
parser.add_argument(
"--users",
"-u",
dest="users",
metavar="P",
nargs="+",
required=False,
default=[],
help="list of people to check in with (usernames only)",
)
parser.add_argument(
"--from-cron", "-c", action="store_true", help="Silence all logging statements (stdout)."
)
parser.add_argument(
"--debug", "-d", action="store_true", help="Log all events bagelbot can see."
)
parser.add_argument(
"--s3-sync",
"-s",
action="store_true",
help="Synchronize SHELVE_FILE with AWS S3 before and after checking attendance.",
)
parsed_args = parser.parse_args()
if parsed_args.from_cron:
with nostdout():
main(parsed_args)
else:
main(parsed_args)
| {
"content_hash": "34878665c3f3b4a33079d054c63ce092",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 129,
"avg_line_length": 37.51123595505618,
"alnum_prop": 0.5149019020518196,
"repo_name": "statmuse/bagelbot",
"id": "8fbc35f0f26ac198a2e2de47a58e4109e404a438",
"size": "6699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_attendance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "192"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "Python",
"bytes": "27076"
}
],
"symlink_target": ""
} |
'''
Created on 2014年2月22日
@author: lan (www.9miao.com)
'''
import sys
from gtwisted.core import reactor
from gtwisted.core.rpc import PBServerProtocl,PBServerFactory
from gtwisted.utils import log
class MyPBServerProtocl(PBServerProtocl):
def remote_getResult(self,a,b):
print a,b
dd = self.getRootObject()
ss = dd.callRemoteForResult('getResult',9,9)
print 'clinet result',ss
print "11111111111"
return a+b
class MyPBServerFactory(PBServerFactory):
protocol = MyPBServerProtocl
reactor.listenTCP(1000, MyPBServerFactory())
log.startLogging(sys.stdout)
reactor.run()
| {
"content_hash": "f3c8ed3ee6edaa0986b97d5a8ef89668",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 24.25,
"alnum_prop": 0.6701030927835051,
"repo_name": "viphxin/gtwisted",
"id": "dcdfddd4a379cd594839ca5207414de55106599a",
"size": "699",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gtwisted/build/lib.linux-x86_64-2.7/gtwisted/test/pbserver_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "221820"
}
],
"symlink_target": ""
} |
"""
...
"""
from __future__ import absolute_import, division, print_function
import logging
from .base_stat_swfilter import BaseStatSWFilter
class AlphaBetaSWFilter(BaseStatSWFilter):
"""
https://en.wikipedia.org/wiki/Alpha_beta_filter
"""
__logger = logging.getLogger(__name__)
def aggregate_windows(self,
window_seq,
alpha=0.85,
beta=0.005,
return_error=False,
**kwargs):
"""
:param window_seq:
:param alpha:
:param beta:
:param return_error:
:param kwargs:
:return:
"""
estimation = 0
velocity = 0
for window in window_seq:
for item in window:
position = estimation + velocity
residual_error = item - position
position += alpha * residual_error
velocity += beta * residual_error
estimation = position
if return_error:
yield residual_error
else:
yield estimation
| {
"content_hash": "c9e3b4d578da6ed74d8bc6cb95a6d273",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 64,
"avg_line_length": 24.6875,
"alnum_prop": 0.480168776371308,
"repo_name": "w495/python-video-shot-detector",
"id": "3136ea4a9ddc562d4f41a877d2df54263becf7a0",
"size": "1209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shot_detector/filters/sliding_window/alpha_beta_swfilter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Makefile",
"bytes": "1751"
},
{
"name": "Python",
"bytes": "599048"
},
{
"name": "Shell",
"bytes": "89"
}
],
"symlink_target": ""
} |
"""
docker_registry.drivers.ufile
~~~~~~~~~~~~~~~~~~~~~~~~~~
UFile is a distributed key/value storage provided by UCloud.
See http://docs.ucloud.cn/ufile/index.html for additional info.
"""
import functools
import logging
import os
import time
import requests
from docker_registry.core import driver
from docker_registry.core import exceptions as de
from docker_registry.core import lru
from ucloudauth import UFileAuth
logger = logging.getLogger(__name__)
GET = "get"
POST = "post"
PUT = "put"
HEAD = "head"
DELETE = "delete"
def add_slash(path):
if path == "" or path.endswith("/"):
return path
else:
return path + "/"
def remove_slash(path):
while path.endswith("/"):
path = path[:-1]
return path
def add_to_index(func):
@functools.wraps(func)
def add_key_to_index(obj, key, *args, **kwargs):
key = remove_slash(key)
dirname, basename = os.path.split(key)
obj._mkdir(dirname)
func(obj, key, *args, **kwargs)
obj._update_index(dirname, adds=[basename])
return add_key_to_index
class Storage(driver.Base):
root_index = "ufile-root-index"
index_header = {"content-type": "application/ufile-index"}
default_retries = 3
default_retry_interval = 1
default_timeout = 60
chunk_size = 10 * 1024 * 1024 # 10MB
def __init__(self, path=None, config=None):
# turn on streaming support
self.supports_bytes_range = True
# config check
for key in ("ufile_baseurl", "ufile_public_key", "ufile_private_key"):
val = getattr(config, key)
if not val:
raise ValueError("storage config {0}={1} is not valid".format(
key, val
))
session = requests.session()
session.auth = UFileAuth(
config.ufile_public_key,
config.ufile_private_key
)
self._session = session
self._baseurl = config.ufile_baseurl
self._retries = config.ufile_retries or self.default_retries
self._interval = (
config.ufile_retry_interval or self.default_retry_interval
)
self._timeout = (config.ufile_timeout or self.default_timeout)
def exists(self, path):
"""check if key exists
:param path: key path
"""
logger.info("check for <{0}>".format(path))
dirname, basename = os.path.split(path)
dirname = dirname or self.root_index
if not basename:
# key is dir
# "HEAD" the index file so we don't download the whole conetnt
try:
self._request(HEAD, dirname)
return True
except de.FileNotFoundError:
logger.error("dir not found: <{0}>".format(dirname))
return False
# key is file
# download the index file first
try:
res = self._request(GET, dirname)
except de.FileNotFoundError:
logger.error("index not found: <{0}>".format(dirname))
return False
# encode key name to bytes, so we can compare with response directly
s_basename = basename.encode("utf8")
for line in res.iter_lines():
# looking for key in index file
if line == s_basename:
return True
logger.debug("<{0}> not found".format(basename))
return False
def get_size(self, path):
"""check filesize
return key's filesize in bytes
:param path: key path
"""
res = self._request(HEAD, path)
return int(res.headers["content-length"])
@lru.get
def get_content(self, path):
"""get content directly
:param path: key path
"""
res = self._request(GET, path)
return res.content
@lru.set
@add_to_index
def put_content(self, path, content):
"""save content to ufile
use `add_to_index` decorator to add path to its dir index
:param path: key path
:param content: content of key
"""
key = remove_slash(path)
logger.info("simple upload <{0}>".format(key))
self._request(PUT, key, data=content)
return path
@lru.remove
def remove(self, path):
"""remove anything, dir or file
:param path: key/dir path
"""
if self._is_dir(path):
self._rmtree(path)
# delete dir in parent-dir index
parent, dir_path = os.path.split(remove_slash(path))
self._update_index(parent, deletes=[dir_path])
else:
self._rm(path)
index, to_delete = os.path.split(path)
self._update_index(index, deletes=[to_delete])
def stream_read(self, path, bytes_range=None):
"""streaming content for output, support bytes-range
:param path: key path
:param bytes_range: bytes range tuple (beg, end)
"""
headers = dict()
if bytes_range:
header_range = "bytes={0}-{1}".format(*bytes_range)
logger.info("bytes range = {0}".format(header_range))
headers["range"] = header_range
res = self._request(GET, path, headers=headers, stream=True)
for data in res.iter_content(self.chunk_size):
yield data
@add_to_index
def stream_write(self, path, fp):
"""ufile multipart upload
:param key: upload file path
:param fp: file-object like data
"""
key = remove_slash(path)
logger.info("multipart upload begin <{0}>".format(key))
upload_id, block_size = self._init_multipart_upload(key)
data = fp.read(block_size)
etags = []
try:
while data:
etag = self._multipart_upload(key, upload_id, len(etags), data)
etags.append(etag)
data = fp.read(block_size)
except BaseException:
self._abort_multipart_upload(key, upload_id)
else:
self._finish_multipart_upload(key, upload_id, ",".join(etags))
def list_directory(self, path=None):
"""list_directory
returns a list of file names (including dir names, with heading slash)
:param dir_path: dir path to ls
"""
for fname in self._lsdir(path):
yield remove_slash(fname)
def _lsdir(self, path):
"""list all content in the target dir
returns a list of filenames
:param path: dir path
"""
dir_path = remove_slash(path or "")
logger.info("list dir for {0}".format(dir_path))
res = self._request(GET, dir_path)
for key, val in self.index_header.items():
if res.headers.get(key) != val:
raise de.FileNotFoundError("{0} is not there".format(dir_path))
return [
os.path.join(dir_path, fname)
for fname in res.text.splitlines()
if fname
]
def _request(self, method, key, **options):
"""send an http request to ufile
do some basic check
if it's ok, return `response` object like normal requests do
if it's 404, raise FileNotFound
otherwise, raise http error
:param method: http verb (lowercase)
:param **options: options will direct pass to requests
"""
if key.startswith("/"):
key = key[1:]
logger.info("request {0} {1}".format(method, key))
req = getattr(self._session, method)
url = "{0}/{1}".format(self._baseurl, key)
options.setdefault("timeout", self._timeout)
for __ in range(self._retries):
try:
res = req(url, **options)
except BaseException as exc:
logger.info(
"http {0} {1} error {2}".format(method, url, exc),
exc_info=True
)
time.sleep(self._interval)
continue
if res.ok:
# everything ok, break retry loop
return res
elif res.status_code == 404:
# not found, break retry loop
raise de.FileNotFoundError("{0} is not there".format(key))
logger.info("http {0} {1} failed: {2}\n{3}\nretry in {4}s".format(
method, url, res, res.text, self._interval
))
time.sleep(self._interval)
else:
# something went wrong we tried our best, raise error
res.raise_for_status()
def _is_dir(self, path):
"""check if dir is a valid path(endswith('/'))"""
is_dir = path.endswith("/")
logger.info("test dir <{0}> {1}".format(path, is_dir))
return is_dir
def _rmtree(self, dir_path):
"""remove dir recursively
:param dir_path: dir path to remove
"""
dir_path = remove_slash(dir_path)
# remove all file in the index
logger.info("remove dir {0}".format(dir_path))
for key in self._lsdir(dir_path):
if self._is_dir(key):
key = remove_slash(key)
logger.info("going to delete dir {0}".format(key))
self._rmtree(key)
else:
logger.info("going to delete key {0}".format(key))
self._rm(key)
# remove index
logger.info("going to delete index {0}".format(dir_path))
self._rm(dir_path)
def _rm(self, key):
"""remove file
:param key: file to delete
"""
logger.info("remove {0}".format(key))
self._request(DELETE, key)
def _mkdir(self, dir_path):
"""mkdir and update dir index recursively
:param dir_path: dir path
"""
logger.info("mkdir {0}".format(dir_path))
dir_path = remove_slash(dir_path)
if not dir_path:
logger.info("skip mkdir the root dir")
return
parent, dirname = os.path.split(dir_path)
dirname = add_slash(dirname)
self._update_index(parent, adds=[dirname])
if parent:
# not root, make parent dir
logger.info(
"continue to make <{0}>'s parent dir <{1}>".format(
dir_path, parent
)
)
return self._mkdir(parent)
def _init_multipart_upload(self, key):
"""initiative a multipart upload
returns a tuple (upload_id, block_size)
:param key: upload file path
"""
logger.info("init multipart upload for <{0}>".format(key))
res = self._request(POST, "{0}?uploads".format(key))
upload_id, block_size = res.json()["UploadId"], res.json()["BlkSize"]
logger.info(
"<{0}> multipart upload inited: block={1}, upload_id={2}".format(
key, block_size, block_size
)
)
return upload_id, block_size
def _multipart_upload(self, key, upload_id, part_number, data):
"""multipart upload, upload part of file
:param key: upload file path
:param upload_id: multipart upload id
:param part_number: part number of the whole file, 0 based
:param data: part of file to upload
"""
logger.info(
"multipart upload part {0} for <{1})>, upload_id={2}".format(
part_number, key, upload_id
)
)
res = self._request(
PUT, key,
params=dict(uploadId=upload_id, partNumber=part_number),
data=data
)
etag = res.headers["etag"]
logger.debug(
"{0} part {1}'s etag is {2}".format(key, part_number, etag)
)
return etag
def _abort_multipart_upload(self, key, upload_id):
"""abort multipart upload procedure
:param key: upload file path
:param upload_id: multipart upload_id
"""
logger.info(
"abort multipart upload for <{0})>, upload_id={1}".format(
key, upload_id,
)
)
self._request(DELETE, key, params=dict(uploadId=upload_id))
def _finish_multipart_upload(self, key, upload_id, etags):
"""finish multipart upload
:param key: upload file path
:param upload_id: multipart upload_id
:param etags: parts etags joined with ","
"""
logger.info(
"finsish multipart upload for <{0})>, upload_id={1}".format(
key, upload_id,
)
)
self._request(POST, key, params=dict(uploadId=upload_id), data=etags)
def _update_index(self, index, adds=None, deletes=None):
"""add/delete filename form index
:param index: index path
:param adds: list containing filenames to add to index
:param deletes: list containing filenames to delete form index
"""
if index == "":
index = self.root_index
logger.info(
"index: {0} has changed, add: {1}, delete: {2}".format(
index, adds, deletes
)
)
try:
res = self._request(GET, index)
current = set(res.text.splitlines())
except de.FileNotFoundError:
# dir not exists
current = set()
current.update(adds or {})
current -= set(deletes or {})
self._request(
PUT, index,
data="\n".join(current) + "\n",
headers=self.index_header
)
| {
"content_hash": "5cfc54c1f2ea0156b499f0a037441924",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 79,
"avg_line_length": 31.327188940092167,
"alnum_prop": 0.5458958517210944,
"repo_name": "SkyLothar/docker-registry-driver-ufile",
"id": "70584d0f1d784c2032899e454a65ba33523be3bc",
"size": "13620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker_registry/drivers/ufile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19889"
}
],
"symlink_target": ""
} |
import sys
import os
from l20n.compiler.js import Compiler
from l20n.format.lol.parser import Parser
from pyjs.serializer import Serializer
def read_file(path):
with file(path) as f:
return f.read()
def get_lol(path):
s = read_file(path)
parser = Parser()
lol = parser.parse(s)
return lol
def compile(path, output=None):
lol = get_lol(path)
compiler = Compiler()
js = compiler.compile(lol)
string = Serializer.dump_program(js)
if output == 'console':
print(string)
return
if output is None:
output = os.path.splitext(path)[0]
output = '%s.%s' % (output, 'j20n')
f = open(output, mode='w')
f.write(string)
f.close()
return
if __name__ == '__main__':
if len(sys.argv) > 2:
compile(sys.argv[1], sys.argv[2])
else:
compile(sys.argv[1])
| {
"content_hash": "a4db3aa616df03a90afc73af4c8c6a4c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 43,
"avg_line_length": 21.170731707317074,
"alnum_prop": 0.5956221198156681,
"repo_name": "stasm/python-l20n",
"id": "2c70d14f353326ebe0ff78540af6df7e179f9db2",
"size": "868",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/compiler.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "90061"
}
],
"symlink_target": ""
} |
import sys, dia, string
def bbox_cb (data, flags) :
layer = data.active_layer
dest = data.add_layer ("BBox of '%s' (%s)" % (layer.name, sys.platform), -1)
box_type = dia.get_object_type ("Standard - Box")
for o in layer.objects :
bb = o.bounding_box
b, h1, h2 = box_type.create (bb.left, bb.top)
b.move_handle (b.handles[7], (bb.right, bb.bottom), 0, 0)
b.properties["show_background"] = 0
b.properties["line_width"] = 0
b.properties["line_colour"] = 'red'
dest.add_object (b)
def annotate_cb (data, flags) :
layer = data.active_layer
dest = data.add_layer ("Annotated '%s' (%s)" % (layer.name, sys.platform), -1)
ann_type = dia.get_object_type ("Standard - Text")
for o in layer.objects :
bb = o.bounding_box
a, h1, h2 = ann_type.create (bb.right, bb.top)
a.properties["text"] = "h: %g w: %g" % (bb.bottom - bb.top, bb.right - bb.left)
dest.add_object (a)
dia.register_action ("DrawBoundingbox", "Draw BoundingBox",
"/DisplayMenu/Debug/DebugExtensionStart",
bbox_cb)
dia.register_action ("AnnotateMeasurements", "Annotate",
"/DisplayMenu/Debug/DebugExtensionStart",
annotate_cb)
| {
"content_hash": "a255bd9863988608b1f562814976756a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 81,
"avg_line_length": 31.842105263157894,
"alnum_prop": 0.6132231404958678,
"repo_name": "krattai/monoflow",
"id": "3839ed0496c338a1354c6c9d7af1babe8d0d6b95",
"size": "2098",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "interfaces/dia/plug-ins/python/bbox.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6211635"
},
{
"name": "C#",
"bytes": "265955"
},
{
"name": "C++",
"bytes": "1067898"
},
{
"name": "CSS",
"bytes": "8774"
},
{
"name": "D",
"bytes": "70"
},
{
"name": "Go",
"bytes": "1075651"
},
{
"name": "JavaScript",
"bytes": "22694"
},
{
"name": "Objective-C",
"bytes": "235362"
},
{
"name": "Objective-C++",
"bytes": "12629"
},
{
"name": "Python",
"bytes": "163871"
},
{
"name": "Shell",
"bytes": "271955"
},
{
"name": "XSLT",
"bytes": "52311"
}
],
"symlink_target": ""
} |
import collections
import os
import os.path
import re
import sys
import docutils.nodes
from typing import Dict, List, Set
DIRECTIVE_PAT = re.compile(r'\s*\.\. (\S+)::\s*([^\n]*)\n?$')
REF_DEF_PAT = re.compile(r'\s*\.\. _([^:\s]+):')
ROLE_PAT = re.compile(r'(?::(\S+):)`(?:[^<`]+\s*<)?([^\s>]+)>?`', re.M)
RefDef = collections.namedtuple('Link', ['title', 'href', 'path', 'intersphinx'])
class LinkAnalyzerVisitor:
def __init__(self, path: str, document) -> None:
self.path = os.path.normpath(path)
self.document = document
self.pending_ref_defs = [] # type: List[RefDef]
self.ref_defs = [] # type: List[RefDef]
# Anything that needs to link to this document directly needs to know
# its title
self.title = ''
doc_ref = os.path.normpath(self.path)
doc_ref = '/' + os.path.splitext(doc_ref)[0]
self.ref_defs.append(RefDef(self.title, doc_ref, self.path, False))
def dispatch_visit(self, node):
if isinstance(node, docutils.nodes.system_message):
return
if isinstance(node, docutils.nodes.target):
if not node.hasattr('ids'):
return
if node.hasattr('refuri'):
# This is a link, not a reference definition
return
ids = [RefDef(node_id, node_id, self.path, False) for node_id in node['ids']]
if node.hasattr('names'):
ids.extend([RefDef(name, name, self.path, False) for name in node['names']])
if ids:
self.pending_ref_defs.extend(ids)
return
if isinstance(node, docutils.nodes.title):
title = node.astext()
if not self.title:
self.title = title
self.ref_defs.extend([ref_def._replace(title=title) for ref_def in self.pending_ref_defs])
self.pending_ref_defs.clear()
return
if isinstance(node, docutils.nodes.section):
return
self.ref_defs.extend(self.pending_ref_defs)
self.pending_ref_defs.clear()
def dispatch_departure(self, node):
# At the end of the document, finalize our title and pending ref_defs
if isinstance(node, docutils.nodes.document):
self.ref_defs[0] = self.ref_defs[0]._replace(title=self.title)
self.ref_defs.extend(self.pending_ref_defs)
self.pending_ref_defs.clear()
class LinkCache:
def __init__(self, root: str) -> None:
self.root = root
# Used for quick link resolution
self.paths = {} # type: Dict[str, List[RefDef]]
self.node_ids = {} # type: Dict[str, RefDef]
# Used for incremental builds
self.dependencies = {} # type: Dict[str, List[str]]
def update(self, env):
for root, _, files in os.walk(self.root):
for filename in files:
filename = os.path.join(root, filename)
if not filename.endswith('.txt'):
continue
rel_filename = filename.replace(self.root, '')
document = env.get_document(filename)
visitor = LinkAnalyzerVisitor(filename, document)
document.walkabout(visitor)
for ref_def in visitor.ref_defs:
self.add(ref_def)
# for ref_id in self.node_ids:
# print(ref_id)
def add(self, ref_def: RefDef):
if ref_def.path not in self.paths:
self.paths[ref_def.path] = []
self.paths[ref_def.path].append(ref_def)
self.node_ids[ref_def.href] = ref_def
def __getitem__(self, node_id: str) -> RefDef:
node_id = self.__normalize_node_id(node_id)
# Unfortunately, docutils throws away case information for target nodes,
# requiring us to check both the "proper" case (for case-sensitive nodes
# that we construct), and the lower case.
try:
return self.node_ids[node_id]
except KeyError:
return self.node_ids[node_id.lower()]
def get_dependencies(self, path: str):
if path not in self.paths:
return []
closed_list = set() # type: Set[str]
open_list = self.paths[path][:]
while open_list:
cur = open_list.pop().path
if cur in closed_list:
continue
open_list.extend(self.paths[cur])
return closed_list
def __parse(self, path):
pass
def __normalize_node_id(self, node_id: str) -> str:
"""Sanity-process a node id; handle newlines, etc."""
return node_id.replace('\n', ' ')
| {
"content_hash": "108b170258d63fea9c25b97a5d1897e7",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 102,
"avg_line_length": 32.296551724137935,
"alnum_prop": 0.5620328849028401,
"repo_name": "jeff-allen-mongo/mut",
"id": "5d00369272e63a852302b5ff47eab55472accb27",
"size": "4707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mut/tuft/linkcache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "246658"
},
{
"name": "Shell",
"bytes": "5139"
}
],
"symlink_target": ""
} |
import re
import math
try:
import numpy
except ImportError:
pass
from nltk.tokenize.api import TokenizerI
BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1
LC, HC = 0, 1
DEFAULT_SMOOTHING = [0]
class TextTilingTokenizer(TokenizerI):
"""Tokenize a document into topical sections using the TextTiling algorithm.
This algorithm detects subtopic shifts based on the analysis of lexical
co-occurrence patterns.
The process starts by tokenizing the text into pseudosentences of
a fixed size w. Then, depending on the method used, similarity
scores are assigned at sentence gaps. The algorithm proceeds by
detecting the peak differences between these scores and marking
them as boundaries. The boundaries are normalized to the closest
paragraph break and the segmented text is returned.
:param w: Pseudosentence size
:type w: int
:param k: Size (in sentences) of the block used in the block comparison method
:type k: int
:param similarity_method: The method used for determining similarity scores:
`BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
:type similarity_method: constant
:param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
:type stopwords: list(str)
:param smoothing_method: The method used for smoothing the score plot:
`DEFAULT_SMOOTHING` (default)
:type smoothing_method: constant
:param smoothing_width: The width of the window used by the smoothing method
:type smoothing_width: int
:param smoothing_rounds: The number of smoothing passes
:type smoothing_rounds: int
:param cutoff_policy: The policy used to determine the number of boundaries:
`HC` (default) or `LC`
:type cutoff_policy: constant
>>> from nltk.corpus import brown
>>> tt = TextTilingTokenizer(demo_mode=True)
>>> text = brown.raw()[:10000]
>>> s, ss, d, b = tt.tokenize(text)
>>> b
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
"""
def __init__(self,
w=20,
k=10,
similarity_method=BLOCK_COMPARISON,
stopwords=None,
smoothing_method=DEFAULT_SMOOTHING,
smoothing_width=2,
smoothing_rounds=1,
cutoff_policy=HC,
demo_mode=False):
if stopwords is None:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
self.__dict__.update(locals())
del self.__dict__['self']
def tokenize(self, text):
"""Return a tokenized copy of *text*, where each "token" represents
a separate topic."""
lowercase_text = text.lower()
paragraph_breaks = self._mark_paragraph_breaks(text)
text_length = len(lowercase_text)
# Tokenization step starts here
# Remove punctuation
nopunct_text = ''.join(c for c in lowercase_text
if re.match("[a-z\-\' \n\t]", c))
nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
tokseqs = self._divide_to_tokensequences(nopunct_text)
# The morphological stemming step mentioned in the TextTile
# paper is not implemented. A comment in the original C
# implementation states that it offers no benefit to the
# process. It might be interesting to test the existing
# stemmers though.
#words = _stem_words(words)
# Filter stopwords
for ts in tokseqs:
ts.wrdindex_list = [wi for wi in ts.wrdindex_list
if wi[0] not in self.stopwords]
token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
# End of the Tokenization step
# Lexical score determination
if self.similarity_method == BLOCK_COMPARISON:
gap_scores = self._block_comparison(tokseqs, token_table)
elif self.similarity_method == VOCABULARY_INTRODUCTION:
raise NotImplementedError("Vocabulary introduction not implemented")
if self.smoothing_method == DEFAULT_SMOOTHING:
smooth_scores = self._smooth_scores(gap_scores)
# End of Lexical score Determination
# Boundary identification
depth_scores = self._depth_scores(smooth_scores)
segment_boundaries = self._identify_boundaries(depth_scores)
normalized_boundaries = self._normalize_boundaries(text,
segment_boundaries,
paragraph_breaks)
# End of Boundary Identification
segmented_text = []
prevb = 0
for b in normalized_boundaries:
if b == 0:
continue
segmented_text.append(text[prevb:b])
prevb = b
if prevb < text_length: # append any text that may be remaining
segmented_text.append(text[prevb:])
if not segmented_text:
segmented_text = [text]
if self.demo_mode:
return gap_scores, smooth_scores, depth_scores, segment_boundaries
return segmented_text
def _block_comparison(self, tokseqs, token_table):
"Implements the block comparison method"
def blk_frq(tok, block):
ts_occs = filter(lambda o: o[0] in block,
token_table[tok].ts_occurences)
freq = sum([tsocc[1] for tsocc in ts_occs])
return freq
gap_scores = []
numgaps = len(tokseqs)-1
for curr_gap in range(numgaps):
score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
score = 0.0
#adjust window size for boundary conditions
if curr_gap < self.k-1:
window_size = curr_gap + 1
elif curr_gap > numgaps-self.k:
window_size = numgaps - curr_gap
else:
window_size = self.k
b1 = [ts.index
for ts in tokseqs[curr_gap-window_size+1 : curr_gap+1]]
b2 = [ts.index
for ts in tokseqs[curr_gap+1 : curr_gap+window_size+1]]
for t in token_table:
score_dividend += blk_frq(t, b1)*blk_frq(t, b2)
score_divisor_b1 += blk_frq(t, b1)**2
score_divisor_b2 += blk_frq(t, b2)**2
try:
score = score_dividend/math.sqrt(score_divisor_b1*
score_divisor_b2)
except ZeroDivisionError:
pass # score += 0.0
gap_scores.append(score)
return gap_scores
def _smooth_scores(self, gap_scores):
"Wraps the smooth function from the SciPy Cookbook"
return list(smooth(numpy.array(gap_scores[:]),
window_len = self.smoothing_width+1))
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start()-last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
def _divide_to_tokensequences(self, text):
"Divides the text into pseudosentences of fixed size"
w = self.w
wrdindex_list = []
matches = re.finditer("\w+", text)
for match in matches:
wrdindex_list.append((match.group(), match.start()))
return [TokenSequence(i/w, wrdindex_list[i:i+w])
for i in range(0, len(wrdindex_list), w)]
def _create_token_table(self, token_sequences, par_breaks):
"Creates a table of TokenTableFields"
token_table = {}
current_par = 0
current_tok_seq = 0
pb_iter = par_breaks.__iter__()
current_par_break = next(pb_iter)
if current_par_break == 0:
try:
current_par_break = next(pb_iter) #skip break at 0
except StopIteration:
raise ValueError(
"No paragraph breaks were found(text too short perhaps?)"
)
for ts in token_sequences:
for word, index in ts.wrdindex_list:
try:
while index > current_par_break:
current_par_break = next(pb_iter)
current_par += 1
except StopIteration:
#hit bottom
pass
if word in token_table:
token_table[word].total_count += 1
if token_table[word].last_par != current_par:
token_table[word].last_par = current_par
token_table[word].par_count += 1
if token_table[word].last_tok_seq != current_tok_seq:
token_table[word].last_tok_seq = current_tok_seq
token_table[word]\
.ts_occurences.append([current_tok_seq,1])
else:
token_table[word].ts_occurences[-1][1] += 1
else: #new word
token_table[word] = TokenTableField(first_pos=index,
ts_occurences= \
[[current_tok_seq,1]],
total_count=1,
par_count=1,
last_par=current_par,
last_tok_seq= \
current_tok_seq)
current_tok_seq += 1
return token_table
def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores)/len(depth_scores)
stdev = numpy.std(depth_scores)
#SB: what is the purpose of this conditional?
if self.cutoff_policy == LC:
cutoff = avg-stdev/2.0
else:
cutoff = avg-stdev/2.0
depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
depth_tuples.reverse()
hp = list(filter(lambda x:x[0]>cutoff, depth_tuples))
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: #undo if there is a boundary close already
if dt[1] != dt2[1] and abs(dt2[1]-dt[1]) < 4 \
and boundaries[dt2[1]] == 1:
boundaries[dt[1]] = 0
return boundaries
def _depth_scores(self, scores):
"""Calculates the depth of each gap, i.e. the average difference
between the left and right peaks and the gap's score"""
depth_scores = [0 for x in scores]
#clip boundaries: this holds on the rule of thumb(my thumb)
#that a section shouldn't be smaller than at least 2
#pseudosentences for small texts and around 5 for larger ones.
clip = min(max(len(scores)/10, 2), 5)
index = clip
for gapscore in scores[clip:-clip]:
lpeak = gapscore
for score in scores[index::-1]:
if score >= lpeak:
lpeak = score
else:
break
rpeak = gapscore
for score in scores[index:]:
if score >= rpeak:
rpeak = score
else:
break
depth_scores[index] = lpeak + rpeak - 2 * gapscore
index += 1
return depth_scores
def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
"""Normalize the boundaries identified to the original text's
paragraph breaks"""
norm_boundaries = []
char_count, word_count, gaps_seen = 0, 0, 0
seen_word = False
for char in text:
char_count += 1
if char in " \t\n" and seen_word:
seen_word = False
word_count += 1
if char not in " \t\n" and not seen_word:
seen_word=True
if gaps_seen < len(boundaries) and word_count > \
(max(gaps_seen*self.w, self.w)):
if boundaries[gaps_seen] == 1:
#find closest paragraph break
best_fit = len(text)
for br in paragraph_breaks:
if best_fit > abs(br-char_count):
best_fit = abs(br-char_count)
bestbr = br
else:
break
if bestbr not in norm_boundaries: #avoid duplicates
norm_boundaries.append(bestbr)
gaps_seen += 1
return norm_boundaries
class TokenTableField(object):
"""A field in the token table holding parameters for each token,
used later in the process"""
def __init__(self,
first_pos,
ts_occurences,
total_count=1,
par_count=1,
last_par=0,
last_tok_seq=None):
self.__dict__.update(locals())
del self.__dict__['self']
class TokenSequence(object):
"A token list with its original length and its index"
def __init__(self,
index,
wrdindex_list,
original_length=None):
original_length=original_length or len(wrdindex_list)
self.__dict__.update(locals())
del self.__dict__['self']
#Pasted from the SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
def smooth(x,window_len=11,window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the beginning and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:return: the smoothed signal
example::
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
:see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = numpy.ones(window_len,'d')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def demo(text=None):
from nltk.corpus import brown
from matplotlib import pylab
tt = TextTilingTokenizer(demo_mode=True)
if text is None: text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show()
| {
"content_hash": "858cf12662f10305d3bccd62945b39b0",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 101,
"avg_line_length": 36.8783185840708,
"alnum_prop": 0.5488631591577179,
"repo_name": "arju88nair/projectCulminate",
"id": "65ab241cac0f33980109e4ab7c0fb619a98372d5",
"size": "16850",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/nltk/tokenize/texttiling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "365921"
},
{
"name": "C++",
"bytes": "237910"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Makefile",
"bytes": "90112"
},
{
"name": "Python",
"bytes": "15199371"
},
{
"name": "Shell",
"bytes": "17795"
}
],
"symlink_target": ""
} |
import os
import sys
if sys.platform == 'win32':
pybabel = 'flask\\Scripts\\pybabel'
else:
pybabel = 'flask/bin/pybabel'
if len(sys.argv) != 2:
print "usage: tr_init <language-code>"
sys.exit(1)
os.system(pybabel + ' extract -F babel.cfg -k lazy_gettext -o messages.pot app')
os.system(pybabel + ' init -i messages.pot -d app/translations -l ' + sys.argv[1])
os.unlink('messages.pot') | {
"content_hash": "4e643decc28c33a007367cfbdfd9dc9c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 82,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.6725,
"repo_name": "vbshah1992/microblog",
"id": "c80525c45825bad11f90a87b2947a17b707d8332",
"size": "419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tr_init.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "11367"
},
{
"name": "JavaScript",
"bytes": "22141"
},
{
"name": "Python",
"bytes": "9352069"
},
{
"name": "Shell",
"bytes": "3667"
},
{
"name": "TeX",
"bytes": "3026"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import itertools
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import models
class Command(BaseCommand):
help = "Prints generic configuration for any models which use a standard SphinxSearch manager."
option_list = BaseCommand.option_list + (
make_option('--all', action='store_true', default=False, dest='find_all', help='generate config for all models in all INSTALLED_APPS'),
)
output_transaction = True
def handle(self, *args, **options):
from djangosphinx.utils.config import generate_config_for_model, generate_sphinx_config
output = []
# warn the user to remove SPHINX_API_VERSION, because we no longer pull from bundled apis
if getattr(settings, 'SPHINX_API_VERSION', None) is not None:
raise CommandError("SPHINX_API_VERSION is deprecated, please use pip for installing the appropriate Sphinx API.")
model_classes = []
if options['find_all']:
model_classes = itertools.chain(*(models.get_models(app) for app in models.get_apps()))
elif len(args):
app_list = [models.get_app(app_label) for app_label in args]
for app in app_list:
model_classes.extend([getattr(app, n) for n in dir(app) if hasattr(getattr(app, n), '_meta')])
else:
raise CommandError("You must specify an app name or use --all")
found = 0
for model in model_classes:
if getattr(model._meta, 'proxy', False) or getattr(model._meta, 'abstract', False):
continue
indexes = getattr(model, '__sphinx_indexes__', [])
for index in indexes:
found += 1
output.append(generate_config_for_model(model, index))
if found == 0:
raise CommandError("Unable to find any models in application which use standard SphinxSearch configuration.")
output.append(generate_sphinx_config())
print ('\n'.join(output))
| {
"content_hash": "25c45ac882d28ec042a302632fb6b7c1",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 143,
"avg_line_length": 40.075471698113205,
"alnum_prop": 0.6459510357815442,
"repo_name": "NewVadim/django-sphinx",
"id": "1659d1b0fc4512d24270b1a0d35df4b642c5e169",
"size": "2124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangosphinx/management/commands/generate_sphinx_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "939"
},
{
"name": "Python",
"bytes": "139934"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_photodb_category'),
]
operations = [
migrations.RemoveField(
model_name='photoext',
name='location',
),
migrations.AddField(
model_name='photoext',
name='longitude',
field=models.FloatField(blank=True, default=None),
preserve_default=False,
),
migrations.AlterField(
model_name='photodb',
name='aspect',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='photoext',
name='height',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='photoext',
name='latitude',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='photoext',
name='width',
field=models.FloatField(blank=True),
),
]
| {
"content_hash": "58931d95ad4fa6dc21612878dbee1b80",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 62,
"avg_line_length": 26.58139534883721,
"alnum_prop": 0.5336832895888014,
"repo_name": "BOOLRon/lovelypic",
"id": "06839a42aef1c044e2c5ba85a13050cc1875a17d",
"size": "1214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0005_auto_20170112_0917.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "102402"
},
{
"name": "HTML",
"bytes": "7992"
},
{
"name": "JavaScript",
"bytes": "32737"
},
{
"name": "Python",
"bytes": "32220"
},
{
"name": "TypeScript",
"bytes": "3782"
}
],
"symlink_target": ""
} |
import unittest
import datetime
from django.conf import settings
from django.db import connection
from models import CustomPKModel, UniqueTogetherModel, UniqueFieldsModel, UniqueForDateModel, ModelToValidate
class GetUniqueCheckTests(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
([(UniqueFieldsModel, ('id',)),
(UniqueFieldsModel, ('unique_charfield',)),
(UniqueFieldsModel, ('unique_integerfield',))],
[]),
m._get_unique_checks()
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
([(UniqueTogetherModel, ('ifield', 'cfield',)),
(UniqueTogetherModel, ('ifield', 'efield')),
(UniqueTogetherModel, ('id',)), ],
[]),
m._get_unique_checks()
)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(([(CustomPKModel, ('my_pk_field',))], []), m._get_unique_checks())
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'date', 'count', 'start_date'),
(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks()
)
class PerformUniqueChecksTest(unittest.TestCase):
def setUp(self):
# Set debug to True to gain access to connection.queries.
self._old_debug, settings.DEBUG = settings.DEBUG, True
super(PerformUniqueChecksTest, self).setUp()
def tearDown(self):
# Restore old debug value.
settings.DEBUG = self._old_debug
super(PerformUniqueChecksTest, self).tearDown()
def test_primary_key_unique_check_not_performed_when_adding_and_pk_not_specified(self):
# Regression test for #12560
query_count = len(connection.queries)
mtv = ModelToValidate(number=10, name='Some Name')
setattr(mtv, '_adding', True)
mtv.full_clean()
self.assertEqual(query_count, len(connection.queries))
def test_primary_key_unique_check_performed_when_adding_and_pk_specified(self):
# Regression test for #12560
query_count = len(connection.queries)
mtv = ModelToValidate(number=10, name='Some Name', id=123)
setattr(mtv, '_adding', True)
mtv.full_clean()
self.assertEqual(query_count + 1, len(connection.queries))
def test_primary_key_unique_check_not_performed_when_not_adding(self):
# Regression test for #12132
query_count= len(connection.queries)
mtv = ModelToValidate(number=10, name='Some Name')
mtv.full_clean()
self.assertEqual(query_count, len(connection.queries))
| {
"content_hash": "fee757db29588c038daf0d09ea24a37c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 109,
"avg_line_length": 39.10526315789474,
"alnum_prop": 0.6255047106325706,
"repo_name": "bfirsh/django-old",
"id": "1b966390c4402c35bc889658546924dcd5eace57",
"size": "2972",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/modeltests/validation/test_unique.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "170739"
},
{
"name": "Python",
"bytes": "5699000"
},
{
"name": "Shell",
"bytes": "3531"
}
],
"symlink_target": ""
} |
import contextlib
import ftplib
import os
import uuid
import paramiko
from nova import exception as nova_exception
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.powervm import constants
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
class Connection(object):
def __init__(self, host, username, password, port=22, keyfile=None):
self.host = host
self.username = username
self.password = password
self.port = port
self.keyfile = keyfile
def ssh_connect(connection):
"""Method to connect to remote system using ssh protocol.
:param connection: a Connection object.
:returns: paramiko.SSHClient -- an active ssh connection.
:raises: PowerVMConnectionFailed
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(connection.host,
username=connection.username,
password=connection.password,
port=connection.port,
key_filename=connection.keyfile,
timeout=constants.POWERVM_CONNECTION_TIMEOUT)
return ssh
except Exception:
LOG.exception(_('Connection error connecting PowerVM manager'))
raise exception.PowerVMConnectionFailed()
def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True):
"""Method to execute remote command as root.
:param connection: an active paramiko.SSHClient connection.
:param command: string containing the command to run.
:returns: Tuple -- a tuple of (stdout, stderr)
:raises: nova.exception.ProcessExecutionError
"""
LOG.debug(_('Running cmd (SSH-as-root): %s') % cmd)
chan = ssh_connection._transport.open_session()
# This command is required to be executed
# in order to become root.
chan.exec_command('ioscli oem_setup_env')
bufsize = -1
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
# We run the command and then call 'exit' to exit from
# super user environment.
stdin.write('%s\n%s\n' % (cmd, 'exit'))
stdin.flush()
exit_status = chan.recv_exit_status()
# Lets handle the error just like nova.utils.ssh_execute does.
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise nova_exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=''.join(cmd))
return (stdout, stderr)
def ftp_put_command(connection, local_path, remote_dir):
"""Method to transfer a file via ftp.
:param connection: a Connection object.
:param local_path: path to the local file
:param remote_dir: path to remote destination
:raises: PowerVMFileTransferFailed
"""
try:
ftp = ftplib.FTP(host=connection.host,
user=connection.username,
passwd=connection.password)
ftp.cwd(remote_dir)
name = os.path.split(local_path)[1]
f = open(local_path, "rb")
ftp.storbinary("STOR " + name, f)
f.close()
ftp.close()
except Exception:
LOG.error(_('File transfer to PowerVM manager failed'))
raise exception.PowerVMFTPTransferFailed(ftp_cmd='PUT',
source_path=local_path, dest_path=remote_dir)
def ftp_get_command(connection, remote_path, local_path):
"""Retrieve a file via FTP
:param connection: a Connection object.
:param remote_path: path to the remote file
:param local_path: path to local destination
:raises: PowerVMFileTransferFailed
"""
try:
ftp = ftplib.FTP(host=connection.host,
user=connection.username,
passwd=connection.password)
ftp.cwd(os.path.dirname(remote_path))
name = os.path.basename(remote_path)
LOG.debug(_("ftp GET %(remote_path)s to: %(local_path)s") % locals())
with open(local_path, 'w') as ftpfile:
ftpcmd = 'RETR %s' % name
ftp.retrbinary(ftpcmd, ftpfile.write)
ftp.close()
except Exception:
LOG.error(_("File transfer from PowerVM manager failed"))
raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',
source_path=remote_path, dest_path=local_path)
def aix_path_join(path_one, path_two):
"""Ensures file path is built correctly for remote UNIX system
:param path_one: string of the first file path
:param path_two: string of the second file path
:returns: a uniform path constructed from both strings
"""
if path_one.endswith('/'):
path_one = path_one.rstrip('/')
if path_two.startswith('/'):
path_two = path_two.lstrip('/')
final_path = path_one + '/' + path_two
return final_path
@contextlib.contextmanager
def vios_to_vios_auth(source, dest, conn_info):
"""Context allowing for SSH between VIOS partitions
This context will build an SSH key on the source host, put the key
into the authorized_keys on the destination host, and make the
private key file name available within the context.
The key files and key inserted into authorized_keys will be
removed when the context exits.
:param source: source IP or DNS name
:param dest: destination IP or DNS name
:param conn_info: dictionary object with SSH connection
information for both hosts
"""
KEY_BASE_NAME = "os-%s" % uuid.uuid4().hex
keypair_uuid = uuid.uuid4()
src_conn_obj = ssh_connect(conn_info)
dest_conn_info = Connection(dest, conn_info.username,
conn_info.password)
dest_conn_obj = ssh_connect(dest_conn_info)
def run_command(conn_obj, cmd):
stdout, stderr = utils.ssh_execute(conn_obj, cmd)
return stdout.strip().splitlines()
def build_keypair_on_source():
mkkey = ('ssh-keygen -f %s -N "" -C %s' %
(KEY_BASE_NAME, keypair_uuid.hex))
ssh_command_as_root(src_conn_obj, mkkey)
chown_key = ('chown %s %s*' % (conn_info.username, KEY_BASE_NAME))
ssh_command_as_root(src_conn_obj, chown_key)
cat_key = ('cat %s.pub' % KEY_BASE_NAME)
pubkey = run_command(src_conn_obj, cat_key)
return pubkey[0]
def cleanup_key_on_source():
rmkey = 'rm %s*' % KEY_BASE_NAME
run_command(src_conn_obj, rmkey)
def insert_into_authorized_keys(public_key):
echo_key = 'echo "%s" >> .ssh/authorized_keys' % public_key
ssh_command_as_root(dest_conn_obj, echo_key)
def remove_from_authorized_keys():
rmkey = ('sed /%s/d .ssh/authorized_keys > .ssh/authorized_keys' %
keypair_uuid.hex)
ssh_command_as_root(dest_conn_obj, rmkey)
public_key = build_keypair_on_source()
insert_into_authorized_keys(public_key)
try:
yield KEY_BASE_NAME
finally:
remove_from_authorized_keys()
cleanup_key_on_source()
| {
"content_hash": "00f967c31cd3fca49c492b7b49ffe6ed",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 77,
"avg_line_length": 34.87619047619047,
"alnum_prop": 0.621381758601857,
"repo_name": "zestrada/nova-cs498cc",
"id": "34658547cb2acd43ce06f4fbd5b16ecff481a632",
"size": "7971",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/virt/powervm/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9215416"
},
{
"name": "Shell",
"bytes": "17117"
}
],
"symlink_target": ""
} |
from robotide.lib.robot.errors import (ExecutionFailed, ExecutionStatus, DataError,
HandlerExecutionFailed, KeywordError, VariableError)
from robotide.lib.robot.utils import ErrorDetails, get_timestamp
class StatusReporter(object):
def __init__(self, context, result, dry_run_lib_kw=False):
self._context = context
self._result = result
self._pass_status = 'PASS' if not dry_run_lib_kw else 'NOT_RUN'
self._test_passed = None
def __enter__(self):
if self._context.test:
self._test_passed = self._context.test.passed
self._result.starttime = get_timestamp()
self._context.start_keyword(self._result)
self._warn_if_deprecated(self._result.doc, self._result.name)
def _warn_if_deprecated(self, doc, name):
if doc.startswith('*DEPRECATED') and '*' in doc[1:]:
message = ' ' + doc.split('*', 2)[-1].strip()
self._context.warn("Keyword '%s' is deprecated.%s" % (name, message))
def __exit__(self, exc_type, exc_val, exc_tb):
context = self._context
result = self._result
failure = self._get_failure(exc_type, exc_val, exc_tb, context)
if failure is None:
result.status = self._pass_status
else:
result.status = failure.status
if result.type == result.TEARDOWN_TYPE:
result.message = failure.message
if context.test:
context.test.passed = self._test_passed and result.passed
result.endtime = get_timestamp()
context.end_keyword(result)
if failure is not exc_val:
raise failure
def _get_failure(self, exc_type, exc_value, exc_tb, context):
if exc_value is None:
return None
if isinstance(exc_value, ExecutionStatus):
return exc_value
if isinstance(exc_value, DataError):
msg = exc_value.message
context.fail(msg)
syntax = not isinstance(exc_value, (KeywordError, VariableError))
return ExecutionFailed(msg, syntax=syntax)
exc_info = (exc_type, exc_value, exc_tb)
failure = HandlerExecutionFailed(ErrorDetails(exc_info))
if failure.timeout:
context.timeout_occurred = True
context.fail(failure.full_message)
if failure.traceback:
context.debug(failure.traceback)
return failure
| {
"content_hash": "5e7efa4417f063dad179b2d8476552a1",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 83,
"avg_line_length": 40.766666666666666,
"alnum_prop": 0.6091578086672118,
"repo_name": "HelioGuilherme66/RIDE",
"id": "44a0052cc27e4a03f89803345ecd60a468bdc1a0",
"size": "3090",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/running/statusreporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
} |
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CeilometerApiTests(test.APITestCase):
def test_sample_list(self):
samples = self.samples.list()
meter_name = "meter_name"
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.samples = self.mox.CreateMockAnything()
ceilometerclient.samples.list(meter_name=meter_name,
q=[],
limit=None).AndReturn(samples)
self.mox.ReplayAll()
ret_list = api.ceilometer.sample_list(self.request,
meter_name,
query=[])
self.assertEqual(len(samples), len(ret_list))
for c in ret_list:
self.assertIsInstance(c, api.ceilometer.Sample)
def test_alarm_list(self):
alarms = self.alarms.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.alarms = self.mox.CreateMockAnything()
ceilometerclient.alarms.list(q=[]).AndReturn(alarms)
self.mox.ReplayAll()
ret_list = api.ceilometer.alarm_list(self.request, query=[])
self.assertEqual(len(alarms), len(ret_list))
for c in ret_list:
self.assertIsInstance(c, api.ceilometer.Alarm)
def test_alarm_get(self):
alarm = self.alarms.first()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.alarms = self.mox.CreateMockAnything()
ceilometerclient.alarms.get(alarm.id).AndReturn(alarm)
self.mox.ReplayAll()
ret_alarm = api.ceilometer.alarm_get(self.request,
alarm_id='fake_alarm_id')
self.assertEqual(alarm.alarm_id, ret_alarm.alarm_id)
def test_alarm_create(self):
alarm = self.alarms.first()
new_alarm = {'alarm': alarm}
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.alarms = self.mox.CreateMockAnything()
ceilometerclient.alarms.create(**new_alarm).AndReturn(alarm)
self.mox.ReplayAll()
test_alarm = api.ceilometer.alarm_create(self.request,
**new_alarm)
self.assertEqual(alarm.alarm_id, test_alarm.alarm_id)
def test_alarm_update(self):
"""test update parameters"""
alarm1 = self.alarms.first()
alarm2 = self.alarms.list()[1]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.alarms = self.mox.CreateMockAnything()
# Return the mock object that has "New" as description
ceilometerclient.alarms.update(alarm1.id,
description='New').AndReturn(alarm2)
self.mox.ReplayAll()
test_alarm = api.ceilometer.alarm_update(self.request,
alarm1.id,
description='New')
self.assertEqual(alarm2.description, test_alarm.description)
def test_meter_list(self):
meters = self.meters.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.meters = self.mox.CreateMockAnything()
ceilometerclient.meters.list([]).AndReturn(meters)
self.mox.ReplayAll()
ret_list = api.ceilometer.meter_list(self.request, [])
self.assertEqual(len(meters), len(ret_list))
for m in ret_list:
self.assertIsInstance(m, api.ceilometer.Meter)
def test_resource_list(self):
resources = self.resources.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=[]).AndReturn(resources)
self.mox.ReplayAll()
ret_list = api.ceilometer.resource_list(self.request, query=[])
self.assertEqual(len(resources), len(ret_list))
for r in ret_list:
self.assertIsInstance(r, api.ceilometer.Resource)
def test_statistic_list(self):
statistics = self.statistics.list()
meter_name = "meter_name"
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=meter_name,
period=None, q=[]).\
AndReturn(statistics)
self.mox.ReplayAll()
ret_list = api.ceilometer.statistic_list(self.request,
meter_name,
period=None,
query=[])
self.assertEqual(len(statistics), len(ret_list))
for s in ret_list:
self.assertIsInstance(s, api.ceilometer.Statistic)
def test_meters_list_all(self):
meters = self.meters.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.meters = self.mox.CreateMockAnything()
ceilometerclient.meters.list(None).AndReturn(meters)
self.mox.ReplayAll()
meters_object = api.ceilometer.Meters(self.request)
ret_list = meters_object.list_all()
for m in ret_list:
self.assertIsInstance(m, api.ceilometer.Meter)
self.assertEqual(3, len(ret_list))
names = ["disk.read.bytes", "disk.write.bytes", "instance"]
for ret in ret_list:
self.assertIn(ret.name, names)
names.remove(ret.name)
def test_meters_list_all_only(self):
meters = self.meters.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.meters = self.mox.CreateMockAnything()
ceilometerclient.meters.list(None).AndReturn(meters)
self.mox.ReplayAll()
meters_object = api.ceilometer.Meters(self.request)
ret_list = meters_object.list_all(only_meters=["disk.read.bytes"])
self.assertEqual(1, len(ret_list))
self.assertEqual("disk.read.bytes", ret_list[0].name)
ret_list = meters_object.list_all(only_meters=["disk.read.bytes",
"instance"])
self.assertEqual(2, len(ret_list))
self.assertEqual("disk.read.bytes", ret_list[0].name)
self.assertEqual("instance", ret_list[1].name)
def test_meters_list_all_except(self):
meters = self.meters.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.meters = self.mox.CreateMockAnything()
ceilometerclient.meters.list(None).AndReturn(meters)
self.mox.ReplayAll()
meters_object = api.ceilometer.Meters(self.request)
ret_list = meters_object.list_all(except_meters=["disk.write.bytes",
"instance"])
self.assertEqual(1, len(ret_list))
self.assertEqual("disk.read.bytes", ret_list[0].name)
ret_list = meters_object.list_all(except_meters=["disk.write.bytes"])
self.assertEqual(len(ret_list), 2)
names = ["disk.read.bytes", "instance"]
for ret in ret_list:
self.assertIn(ret.name, names)
names.remove(ret.name)
# TODO(lsmola) Test resource aggregates.
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_data_get(self):
class TempUsage(api.base.APIResourceWrapper):
_attrs = ["id", "tenant", "user", "resource", "get_meter"]
meters = ["fake_meter_1",
"fake_meter_2"]
default_query = ["Fake query"]
stats_attr = "max"
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
# I am returning only 1 resource
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources[:1])
ceilometerclient.statistics = self.mox.CreateMockAnything()
# check that list is called twice for one resource and 2 meters
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
AndReturn(statistics)
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_data_get(
used_cls=TempUsage, query=["fake_query"], with_statistics=True)
first = data[0]
self.assertEqual('fake_project_id__fake_user_id__'
'fake_resource_id',
first.id)
self.assertEqual('user', first.user.name)
self.assertEqual('test_tenant', first.tenant.name)
self.assertEqual('fake_resource_id', first.resource)
self.assertEqual(9, first.get_meter('fake_meter_1'),)
self.assertEqual(9, first.get_meter('fake_meter_2'),)
self.assertEqual(2, len(first.meters))
# check that only one resource is returned
self.assertEqual(1, len(data))
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_data_get_without_statistic_data(self):
class TempUsage(api.base.APIResourceWrapper):
_attrs = ["id", "tenant", "user", "resource", "fake_meter_1",
"fake_meter_2"]
meters = ["fake_meter_1",
"fake_meter_2"]
default_query = ["Fake query"]
stats_attr = "max"
resources = self.resources.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_data_get(
used_cls=TempUsage, query=["fake_query"], with_statistics=False)
first = data[0]
self.assertEqual('fake_project_id__fake_user_id__'
'fake_resource_id',
first.id)
self.assertEqual('user', first.user.name)
self.assertEqual('test_tenant', first.tenant.name)
self.assertEqual('fake_resource_id', first.resource)
self.assertRaises(AttributeError, getattr, first, 'fake_meter_1')
self.assertRaises(AttributeError, getattr, first, 'fake_meter_2')
self.assertEqual(len(resources), len(data))
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_data_get_all_statistic_data(self):
class TempUsage(api.base.APIResourceWrapper):
_attrs = ["id", "tenant", "user", "resource", "get_meter", ]
meters = ["fake_meter_1",
"fake_meter_2"]
default_query = ["Fake query"]
stats_attr = None # have to return dictionary with all stats
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
MultipleTimes().\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_data_get(
used_cls=TempUsage, query=["fake_query"], with_statistics=True)
first = data[0]
self.assertEqual('fake_project_id__fake_user_id__'
'fake_resource_id',
first.id)
self.assertEqual('user', first.user.name)
self.assertEqual('test_tenant', first.tenant.name)
self.assertEqual('fake_resource_id', first.resource)
statistic_obj = api.ceilometer.Statistic(statistics[0])
# check that it returns whole statistic object
self.assertEqual(vars(first.get_meter('fake_meter_1')[0]),
vars(statistic_obj))
self.assertEqual(vars(first.get_meter('fake_meter_2')[0]),
vars(statistic_obj))
self.assertEqual(len(resources), len(data))
| {
"content_hash": "0a06e5040d038c9b36606621582737e9",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 77,
"avg_line_length": 40.74063400576369,
"alnum_prop": 0.5936903161915541,
"repo_name": "bac/horizon",
"id": "16be994eef965f1c4a56ec1bb46887b14c80db3d",
"size": "14717",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/api_tests/ceilometer_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103495"
},
{
"name": "HTML",
"bytes": "542157"
},
{
"name": "JavaScript",
"bytes": "1720604"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5280278"
},
{
"name": "Shell",
"bytes": "19049"
}
],
"symlink_target": ""
} |
import datetime
import copy
import yaml
class YamlPrinter():
_references= {}
def get_header(self, filetype):
yoda_header = "---\n"
yoda_header += "YODA:\n"
yoda_header += " - {"
yoda_header += "Version: \"{0}\", ".format("0.1.0")
yoda_header += "Profile: \"{0}\", ".format(filetype)
yoda_header += "CreationTool: \"{0}\", ".format("YodaConverter")
yoda_header += "DateCreated: \"{0}\", ".format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d'))
yoda_header += "DateUpdated: \"{0}\"".format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d'))
yoda_header += "}\n"
return yoda_header.replace('None', 'NULL')
def handle_inheritance(self, apiobjs):
# break into groups of object types (specimen vs site)
from collections import defaultdict
objlist = defaultdict(list)
text = ""
for obj in apiobjs:
objlist[obj.__class__.__name__].append(obj)
index = 0
# call current function with each list, send in class name
for k, v in objlist.items():
text += self.print_objects(objlist[k], index)
index += len(objlist[k])
return text
def fill_dict(self, obj):
for val in ["SpecimenTypeCV", "SiteTypeCV", "CensorCodeCV"]:
try:
getattr(obj, val)
except:
pass
def print_objects(self, apiobjs, ind=0):
# TODO handle inheritance objects
objname = apiobjs[0].__class__.__name__
text = objname + ":\n"
index = 1 + ind
for obj in apiobjs:
primarykey = obj.__mapper__.primary_key[0].name
#add this try/except block to make sure the inherited objects' dictionaries have all the metadata
self.fill_dict(obj)
valuedict = obj.__dict__.copy()
#find the attribute name of the primary key
for v in valuedict:
if v.lower() == obj.__mapper__.primary_key[0].name:
# pop unwanted items from the dictionary
valuedict.pop(v)
if v != "BridgeID" and v != "RelationID":
primarykey = v
else:
#what to do if the primary key is BridgeID?
if objname[-1] == "s":
primarykey = objname[:-1] + "ID"
else:
primarykey = objname + "ID"
break
#remove all id's from the dictionary
for k in valuedict.keys():
if "id" in k.lower() and "uuid" not in k.lower():
del valuedict[k]
#assign the reference value for objects
for key in dir(obj):
if "obj" in key.lower(): # key.contains("Obj"):
try:
att = getattr(obj, key)
if att is not None:
valuedict[key] = self._references[att]
else:
valuedict[key] = "NULL"
except Exception as e:
# print ("cannot find {} in {}. Error:{} in YamlPrinter".format(key, obj.__class__, e))
pass
self._references[obj] = '*{}{:0>4d}'.format(primarykey, index)
text += ' - &{}{:0>4d} '.format(primarykey, index)
text += self.print_dictionary(valuedict)
index += 1
return text
def print_dictionary(self, dict):
from numbers import Number
final_string = "{"
for k, v in dict.items():
#if the item is null don't print it
if v is None:
final_string += '{}: NULL, '.format(k)
elif isinstance(v, Number):
final_string += '{}: {}, '.format(k, v)
elif isinstance(v, basestring):
if '*' in v:
final_string += '{}: {}, '.format(k, v)
else:
final_string += '{}: "{}", '.format(k, v)
elif isinstance(v, datetime.datetime) or isinstance(v, datetime.date):
final_string += '{}: "{}", '.format(k, v.strftime("%Y-%m-%d %H:%M"))
final_string = "{}}}\n".format(final_string[:-2])
return final_string
def add_to_db(self, objname, file, data):
if objname in data:
# check to see if this is an inherited object
if data[objname][0].__class__ !=data[objname][0]._sa_instance_state.key[0]:
file.write(self.handle_inheritance(apiobjs=data[objname]))
else:
file.write(self.print_objects(data[objname]))
def generate_ts_objects(self, serial):
text = 'TimeSeriesResultValues:\n'
text += ' ColumnDefinitions:\n'
text += ' - {ColumnNumber: 0001, Label: "ValueDateTime", ODM2Field: ValueDateTime}\n'
text += ' - {ColumnNumber: 0002, Label: "ValueDateTimeUTCOffset", ODM2Field: ValueDateTimeUTCOffset}\n'
ind = 3
meta = serial.groupby('resultid').min()
del meta["datavalue"]
del meta["valueid"]
cross_tab = self.generate_ts_data(serial)
for index, row in meta.iterrows():
varname, resultkey, taiuObj = ["", "", ""]
try:
unit = self.units_dict[row["timeaggregationintervalunitsid"]]
# get unit
taiuObj = self._references[unit]
result = self.result_dict[index]
# get result
resultkey = self._references[result]
# get varname
varname = result.VariableObj.VariableCode
# Change column names from ResultID to VariableCode, then VariableCode & SamplingFeatureCode, then
# VariableCode & SamplingFeatureCode &
cross_tab.rename(columns={index: varname}, inplace=True)
serial.ix[serial.resultid == index] = varname
except Exception as e:
print "I am an error" + e.message
text += ' - {{ColumnNumber: {:0>4d}, Label: "{}", ODM2Field: "DataValue", ' \
'Result: {}, CensorCodeCV: "{}", QualityCodeCV: "{}", ' \
'TimeAggregationInterval: {}, TimeAggregationIntervalUnitsObj: {}}}' \
'\n'.format(ind, varname, resultkey, row["censorcodecv"], row["qualitycodecv"],
row["timeaggregationinterval"], taiuObj)
ind += 1
text += " Data:\n"
text += " - [[\n"
text += cross_tab.to_csv(line_terminator='],[\n')
text += "]]\n"
return text
def generate_ts_data(self, serial):
# Timeseriesresultvalues - ColumnDefinitions:, Data:
cross_tab = serial.pivot_table(
index=["valuedatetime", "valuedatetimeutcoffset"],
columns="resultid",
values="datavalue")
# cross_tab = cross_tab.rename(columns={'valuedatetime': 'ValueDateTime', 'valuedatetimeutcoffset': 'ValueDateTimeUTCOffset'})
cross_tab.index.names = ['ValueDateTime', 'ValueDateTimeUTCOffset']
return cross_tab
def parse_meta(self, data):
self.result_dict = {}
for res in data["results"]:
self.result_dict[res.ResultID] = res
self.units_dict = {}
for unit in data["units"]:
self.units_dict[unit.UnitsID] = unit
def print_yoda(self, out_file, data):
self.data = data
if "measurementresultvalues" in data:
filetype = "SpecimenTimeSeries"
else:
filetype = "TimeSeries"
with open(out_file, 'w') as yaml_schema_file:
# Header
yaml_schema_file.write(self.get_header(filetype))
# Data Set
self.add_to_db("datasets", yaml_schema_file, data)
# Organization
self.add_to_db("organizations", yaml_schema_file, data)
# People
self.add_to_db("people", yaml_schema_file, data)
# Affiliations
self.add_to_db("affiliations", yaml_schema_file, data)
# Citations
self.add_to_db("citations", yaml_schema_file, data)
# Author Lists
self.add_to_db("authorlists", yaml_schema_file, data)
# Data Set Citations
self.add_to_db("datasetcitations", yaml_schema_file, data)
# Spatial References
self.add_to_db("spatialreferences", yaml_schema_file, data)
# Sampling Features:
self.add_to_db("samplingfeatures", yaml_schema_file, data)
# Related Features
self.add_to_db("relatedfeatures", yaml_schema_file, data)
# Units
self.add_to_db("units", yaml_schema_file, data)
# Annotations
self.add_to_db("annotations", yaml_schema_file, data)
# Methods
self.add_to_db("methods", yaml_schema_file, data)
# Variables
self.add_to_db("variables", yaml_schema_file, data)
# Processing Level
self.add_to_db("processinglevels", yaml_schema_file, data)
# Action
self.add_to_db("actions", yaml_schema_file, data)
# Feature Action
self.add_to_db("featureactions", yaml_schema_file, data)
# Action By
self.add_to_db("actionby", yaml_schema_file, data)
# Related Actions
self.add_to_db("relatedactions", yaml_schema_file, data)
# Result
self.add_to_db("results", yaml_schema_file, data)
self.parse_meta(data)
# Data Set Results
self.add_to_db("datasetsresults", yaml_schema_file, data)
# Measurement Result Values
self.add_to_db("measurementresultvalues", yaml_schema_file, data)
# Measurement Result Value Annotations
self.add_to_db("measurementresultvalueannotations", yaml_schema_file, data)
# Time Series Result Values
val = "timeseriesresultvalues"
if val in data:
yaml_schema_file.write(self.generate_ts_objects(data[val]))
yaml_schema_file.write("...")
| {
"content_hash": "e399ee2e04b65400385ad8efed3b2f35",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 134,
"avg_line_length": 38.79182156133829,
"alnum_prop": 0.5259223766171538,
"repo_name": "ODM2/ODM2YODAParser",
"id": "eac981b1c1ae1296cdfadcddf0f7343de8d90124",
"size": "10436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yodatools/yodaparser/yamlPrinter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "279523"
}
],
"symlink_target": ""
} |
import pytest
import salt.cloud.clouds.openstack as openstack
from salt.utils import dictupdate
from tests.support.mock import MagicMock, call, patch
# pylint: disable=confusing-with-statement
class MockImage:
name = "image name"
id = "image id"
class MockNode:
name = "node name"
id = "node id"
flavor = MockImage()
status = "node status"
def __init__(self, image):
self.image = image
def __iter__(self):
return iter(())
class MockConn:
def __init__(self, image):
self.node = MockNode(image)
def get_image(self, *args, **kwargs):
return self.node.image
def get_flavor(self, *args, **kwargs):
return self.node.flavor
def get_server(self, *args, **kwargs):
return self.node
def list_servers(self, *args, **kwargs):
return [self.node]
@pytest.fixture
def configure_loader_modules():
return {
openstack: {
"__active_provider_name__": "",
"__opts__": {
"providers": {
"my-openstack-cloud": {
"openstack": {
"auth": "daenerys",
"region_name": "westeros",
"cloud": "openstack",
}
}
}
},
}
}
@pytest.fixture
def expected_config_stuff():
vm = {"asdf": ...}
with patch("copy.deepcopy", autospec=True, return_value=42), patch.dict(
openstack.__opts__, {"foo": "bar"}
):
yield vm
def test_when_getting_cloud_config_values_expected_args_should_be_provided():
expected_vm = "whatever"
expected_calls = call(
"ignore_cidr", expected_vm, openstack.__opts__, default="", search_global=False
)
@pytest.mark.parametrize(
"comment,example_ip,ignored_cidr,expected",
[
("ip is in ignore_cidr string", "203.0.113.1", "203.0.113.0/24", True),
("ip is not in ignore_cidr string", "192.0.2.1", "203.0.113.0/24", False),
("ignore_cidr is empty", "192.0.2.1", "", False),
("ignore_cidr is False", "192.0.2.1", False, False),
("ignore_cidr is None", "192.0.2.1", None, False),
(
"ip is in ignore_cidr list",
"192.0.2.1",
["192.0.2.0/24", "203.0.113.0/24"],
True,
),
(
"ip is not in ignore_cidr list",
"192.0.2.1",
["198.51.100.0/24", "203.0.113.0/24"],
False,
),
],
)
def test_when_ignore_cidr_is_configured_and_ip_is_provided_result_is_expected(
comment, example_ip, ignored_cidr, expected
):
with patch(
"salt.config.get_cloud_config_value", autospec=True, return_value=ignored_cidr
):
result = openstack.ignore_cidr("fnord", example_ip)
assert result is expected
@pytest.mark.parametrize(
"comment,example_ips,ignored_cidr,expected",
[
(
"ignore_cidr matches first 2 ips, expected value will be first ip that"
" doesn't match cidr.",
["203.0.113.1", "203.0.113.2", "192.0.2.1", "192.0.2.2"],
"203.0.113.0/24",
"192.0.2.1",
),
(
"ignore_cidr matches 2nd 2 IPs, expected value will be first ip in list. ",
["203.0.113.1", "203.0.113.2", "192.0.2.1", "192.0.2.2"],
"192.0.2.0/24",
"203.0.113.1",
),
(
"ignore_cidr doesn't match any IPs, expected value will be first ip in"
" list.",
["203.0.113.1", "203.0.113.2", "192.0.2.1", "192.0.2.2"],
"198.51.100.0/24",
"203.0.113.1",
),
(
"ignore_cidr matches all IPs, expected value will be False.",
["203.0.113.1", "203.0.113.2", "203.0.113.3", "203.0.113.4"],
"203.0.113.0/24",
False,
),
(
"When ignore_cidr is not set, return first ip",
["203.0.113.1", "203.0.113.2", "192.0.2.1", "192.0.2.2"],
None,
"203.0.113.1",
),
],
)
def test_preferred_ip_function_returns_expected(
comment, example_ips, ignored_cidr, expected
):
with patch(
"salt.config.get_cloud_config_value", autospec=True, return_value=ignored_cidr
):
result = openstack.preferred_ip("fnord", example_ips)
assert result is expected
def test_get_configured_provider_bad():
with patch.dict(openstack.__opts__, {"providers": {}}):
result = openstack.get_configured_provider()
assert result is False
def test_get_configured_provider_auth():
config = {
"region_name": "westeros",
"auth": "daenerys",
}
with patch.dict(
openstack.__opts__,
{"providers": {"my-openstack-cloud": {"openstack": config}}},
):
result = openstack.get_configured_provider()
assert config == result
def test_get_configured_provider_cloud():
config = {
"region_name": "westeros",
"cloud": "foo",
}
with patch.dict(
openstack.__opts__,
{"providers": {"my-openstack-cloud": {"openstack": config}}},
):
result = openstack.get_configured_provider()
assert config == result
def test_get_dependencies():
HAS_SHADE = (True, "Please install newer version of shade: >= 1.19.0")
with patch("salt.cloud.clouds.openstack.HAS_SHADE", HAS_SHADE):
result = openstack.get_dependencies()
assert result is True
def test_get_dependencies_no_shade():
HAS_SHADE = (False, "Install pypi module shade >= 1.19.0")
with patch("salt.cloud.clouds.openstack.HAS_SHADE", HAS_SHADE):
result = openstack.get_dependencies()
assert result is False
def test_list_nodes_full_image_str():
node_image = "node image"
conn = MockConn(node_image)
with patch("salt.cloud.clouds.openstack._get_ips", return_value=[]):
ret = openstack.list_nodes_full(conn=conn)
assert ret[conn.node.name]["image"] == node_image
def test_list_nodes_full_image_obj():
conn = MockConn(MockImage())
with patch("salt.cloud.clouds.openstack._get_ips", return_value=[]):
ret = openstack.list_nodes_full(conn=conn)
assert ret[conn.node.name]["image"] == MockImage.name
def test_show_instance():
conn = MockConn(MockImage())
with patch("salt.cloud.clouds.openstack._get_ips", return_value=[]):
ret = openstack.show_instance(conn.node.name, conn=conn, call="action")
assert ret["image"] == MockImage.name
def test_request_instance_should_use_provided_connection_if_not_None():
fake_conn = MagicMock()
patch_get_conn = patch("salt.cloud.clouds.openstack.get_conn", autospec=True)
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
patch_shade = patch.object(
openstack, "shade.exc.OpenStackCloudException", Exception, create=True
)
with patch_get_conn as fake_get_conn, patch_utils, patch_shade:
openstack.request_instance(
vm_={"name": "fnord", "driver": "fnord"}, conn=fake_conn
)
fake_get_conn.assert_not_called()
def test_request_instance_should_create_conn_if_provided_is_None():
none_conn = None
patch_get_conn = patch("salt.cloud.clouds.openstack.get_conn", autospec=True)
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
patch_shade = patch.object(
openstack, "shade.exc.OpenStackCloudException", Exception, create=True
)
with patch_get_conn as fake_get_conn, patch_utils, patch_shade:
openstack.request_instance(
vm_={"name": "fnord", "driver": "fnord"}, conn=none_conn
)
fake_get_conn.assert_called_once_with()
# According to
# https://docs.openstack.org/shade/latest/user/usage.html#shade.OpenStackCloud.create_server
# the `network` parameter can be:
# (optional) Network dict or name or ID to attach the server to.
# Mutually exclusive with the nics parameter. Can also be be a list of
# network names or IDs or network dicts.
#
# Here we're testing a normal dictionary
def test_request_instance_should_be_able_to_provide_a_dictionary_for_network():
fake_conn = MagicMock()
expected_network = {"foo": "bar"}
vm_ = {"name": "fnord", "driver": "fnord", "network": expected_network}
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
with patch_utils:
openstack.request_instance(vm_=vm_, conn=fake_conn)
call_kwargs = fake_conn.create_server.mock_calls[0][-1]
assert call_kwargs["network"] == expected_network
# Here we're testing the list of dictionaries
def test_request_instance_should_be_able_to_provide_a_list_of_dictionaries_for_network():
fake_conn = MagicMock()
expected_network = [{"foo": "bar"}, {"bang": "quux"}]
vm_ = {"name": "fnord", "driver": "fnord", "network": expected_network}
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
with patch_utils:
openstack.request_instance(vm_=vm_, conn=fake_conn)
call_kwargs = fake_conn.create_server.mock_calls[0][-1]
assert call_kwargs["network"] == expected_network
# Here we're testing for names/IDs
def test_request_instance_should_be_able_to_provide_a_list_of_single_ids_or_names_for_network():
fake_conn = MagicMock()
expected_network = ["foo", "bar", "bang", "fnord1", "fnord2"]
vm_ = {"name": "fnord", "driver": "fnord", "network": expected_network}
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
with patch_utils:
openstack.request_instance(vm_=vm_, conn=fake_conn)
call_kwargs = fake_conn.create_server.mock_calls[0][-1]
assert call_kwargs["network"] == expected_network
# Testing that we get a dict that we expect for create_server
def test__clean_create_kwargs():
params = {
"name": "elmer",
"image": "mirrormirror",
"flavor": "chocolate",
"auto_ip": True,
"ips": ["hihicats"],
"ip_pool": "olympic",
"root_volume": "iamgroot",
"boot_volume": "pussnboots",
"terminate_volume": False,
"volumes": ["lots", "of", "books"],
"meta": {"full": "meta"},
"files": {"shred": "this"},
"reservation_id": "licenseandregistration",
"security_groups": ["wanna", "play", "repeat"],
"key_name": "clortho",
"availability_zone": "callmemaybe",
"block_device_mapping": [{"listof": "dicts"}],
"block_device_mapping_v2": [{"listof": "dicts"}],
"nics": ["thats", "me"],
"scheduler_hints": {"so": "many"},
"config_drive": True,
"disk_config": "donkey",
"admin_pass": "password",
"wait": False,
"timeout": 30,
"reuse_ips": True,
"network": ["also", "a", "dict"],
"boot_from_volume": True,
"volume_size": 30,
"nat_destination": "albuquerque",
"group": "ledzeppelin",
"userdata": "needmoreinput",
"thisgetsdropped": "yup",
}
patch_utils = patch.dict(
openstack.__utils__,
{"dictupdate.update": dictupdate.update},
)
with patch_utils:
ret = openstack._clean_create_kwargs(**params)
params.pop("thisgetsdropped")
assert params == ret
| {
"content_hash": "d5755b35bb815f73a85b6b642ce2552c",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 96,
"avg_line_length": 31.835135135135136,
"alnum_prop": 0.5781475507258681,
"repo_name": "saltstack/salt",
"id": "f78935c144991d8625d15ae6a97c9f96d2403bea",
"size": "11779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/unit/cloud/clouds/test_openstack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
t -- Student's T
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
nanmean -- Mean, ignoring NaN values
nanstd -- Standard deviation, ignoring NaN values
nanmedian -- Median, ignoring NaN values
variation -- Coefficient of variation
.. autosummary::
:toctree: generated/
cumfreq _
histogram2 _
histogram _
itemfreq _
percentileofscore _
scoreatpercentile _
relfreq _
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
sem
zmap
zscore
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
#remove vonmises_cython from __all__, I don't know why it is included
__all__ = [s for s in dir() if not (s.startswith('_') or s.endswith('cython'))]
from numpy.testing import Tester
test = Tester().test
| {
"content_hash": "d1ee00eeb704aeb661252767b146d2bd",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 94,
"avg_line_length": 26.489028213166144,
"alnum_prop": 0.5715976331360947,
"repo_name": "mortonjt/scipy",
"id": "7d71537b991ed181eea4389ec2e1eccd521f49ab",
"size": "8450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/stats/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4188768"
},
{
"name": "C++",
"bytes": "3595822"
},
{
"name": "FORTRAN",
"bytes": "5560532"
},
{
"name": "HTML",
"bytes": "124328"
},
{
"name": "Makefile",
"bytes": "4903"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "8064774"
},
{
"name": "Shell",
"bytes": "3798"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import setuptools
import sys
requiredList = ['requests', 'python-dateutil']
if sys.version_info[:2] <= (2, 6):
requiredList.extend(['argparse', 'unittest2'])
if sys.version_info[:2] <= (2, 7):
requiredList.extend(['mock'])
if sys.version_info[:2] <= (3,):
requiredList.extend([])
setuptools.setup(
name='Troz',
version='1.0',
description='The Wizard Of Troz',
author='Peter Naudus',
author_email='[email protected]',
url='http://TheTroz.com',
packages=setuptools.find_packages(),
scripts = ['troz.py'],
install_requires=requiredList
)
| {
"content_hash": "ad58184453fd9a8cf645f2a2c915af5b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 25.695652173913043,
"alnum_prop": 0.6480541455160744,
"repo_name": "dvrasp/TheTroz",
"id": "465125bbb324247d5bf50f49b31dd9873efbfb50",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65975"
}
],
"symlink_target": ""
} |
import json
import os
class Configuration(object):
def __init__(self, directory):
self.package = None
self.name = None
self.icon_name = None
self.version = None
self.numeric_version = None
self.orientation = None
self.permissions = [ "VIBRATE" ]
self.include_pil = False
self.include_sqlite = False
self.layout = None
self.source = False
self.expansion = False
try:
with file(os.path.join(directory, ".android.json"), "r") as f:
d = json.load(f)
self.__dict__.update(d)
except:
pass
def save(self, directory):
with file(os.path.join(directory, ".android.json"), "w") as f:
json.dump(self.__dict__, f)
def set_version(config, value):
"""
Sets the version, and tries to set the numeric versions based on the
version number.
"""
config.version = value
try:
v = 0
for i in config.version.split('.'):
v *= 100
v += int(i)
config.numeric_version = str(v)
except:
pass
def configure(interface, directory):
renpy = os.path.exists("renpy")
config = Configuration(directory)
config.name = interface.input("""What is the full name of your application? This name will appear in the list of installed applications.""", config.name)
if config.icon_name is None:
config.icon_name = config.name
config.icon_name = interface.input("What is the short name of your application? This name will be used in the launcher, and for application shortcuts.", config.icon_name)
config.package = interface.input("""\
What is the name of the package?
This is usually of the form com.domain.program or com.domain.email.program. It
must only contain ASCII letters and dots.""", config.package)
version = interface.input("""\
What is the application's version?
This should be the human-readable version that you would present to a person.""", config.version)
set_version(config, version)
config.numeric_version = interface.input("""What is the version code?
This should be an integer number, and the value should increase between versions.""", config.numeric_version)
config.orientation = interface.choice("How would you like your application to be displayed?", [
("landscape", "In landscape mode."),
("portrait", "In portrait mode."),
], config.orientation)
config.expansion = interface.choice("Would you like to create an expansion APK?", [
(False, "No. Size limit of 50 MB on Google Play, but can be distributed through other store and sideloaded."),
(True, "Yes. 2 GB size limit, but won't work outside of Google Play.")
], config.expansion)
if not renpy:
config.layout = interface.choice("How is your application laid out?", [
("internal", "A single directory, that will be placed on device internal storage."),
("external", "A single directory, that will be placed on device external storage."),
("split", "Multiple directories that correspond to internal, external, and asset storage."),
], config.layout)
config.source = interface.yesno("Do you want to include the Python source code of your application in the archive? If you include it once, you'll need to include it always.", config.source)
permissions = " ".join(config.permissions)
permissions = interface.input("""\
What permissions should your application have? Possible permissions include:
INTERNET (network access), VIBRATE (vibration control).
Please enter a space-separated list of permissions.""", permissions)
config.permissions = permissions.split()
config.include_sqlite = interface.yesno("Do you want to include SQLite3 with your application?", config.include_sqlite)
config.include_pil = interface.yesno("Do you want to include the Python Imaging Library (PIL) with your application?", config.include_pil)
if renpy:
if not config.expansion:
internet = "INTERNET" in config.permissions
internet = interface.yesno("Do you want to allow the app to access the Internet?", internet)
else:
internet = False # included in template.
permissions = [ i for i in config.permissions if i not in [ "INTERNET" ] ]
if internet:
permissions.append("INTERNET")
config.permissions = permissions
config.save(directory)
def set_config(iface, directory, var, value):
config = Configuration(directory)
if var == "version":
set_version(config, value)
elif var == "permissions":
config.permissions = value.split()
elif hasattr(config, var):
setattr(config, var, value)
else:
iface.fail("Unknown configuration variable: {}".format(var))
config.save(directory)
| {
"content_hash": "e6f05d8beea9a47287653208ed2b4fec",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 197,
"avg_line_length": 34.18421052631579,
"alnum_prop": 0.6198999230177059,
"repo_name": "flyher/pymo",
"id": "4ee1b629084e403c150ad20af5a330df6b931b12",
"size": "5196",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "android/pgs4a-0.9.6/buildlib/configure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "581274"
},
{
"name": "C++",
"bytes": "151108"
},
{
"name": "Clarion",
"bytes": "2743"
},
{
"name": "Groff",
"bytes": "13374"
},
{
"name": "HTML",
"bytes": "240526"
},
{
"name": "Java",
"bytes": "149837"
},
{
"name": "Makefile",
"bytes": "144854"
},
{
"name": "Python",
"bytes": "16929339"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "29384"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from time import sleep, time
from threading import Event
from subprocess import PIPE, STDOUT
import socket
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, Process, TestTimeout, \
AsyncTestSender, AsyncTestReceiver, MgmtMsgProxy, unittest, QdManager
from test_broker import FakeBroker
from test_broker import FakeService
from proton import Delivery, symbol
from proton import Message, Condition
from proton.handlers import MessagingHandler
from proton.reactor import AtMostOnce, Container, DynamicNodeProperties, LinkOption, AtLeastOnce
from proton.reactor import ApplicationEvent
from proton.reactor import EventInjector
from proton.utils import BlockingConnection
from system_tests_drain_support import DrainMessagesHandler, DrainOneMessageHandler, DrainNoMessagesHandler, DrainNoMoreMessagesHandler
from qpid_dispatch.management.client import Node
from qpid_dispatch.management.error import NotFoundStatus, BadRequestStatus
class LinkRouteTest(TestCase):
"""
Tests the linkRoute property of the dispatch router.
Sets up 4 routers (two of which are acting as brokers (QDR.A, QDR.D)). The other two routers have linkRoutes
configured such that matching traffic will be directed to/from the 'fake' brokers.
(please see configs in the setUpClass method to get a sense of how the routers and their connections are configured)
The tests in this class send and receive messages across this network of routers to link routable addresses.
Uses the Python Blocking API to send/receive messages. The blocking api plays neatly into the synchronous nature
of system tests.
QDR.A acting broker #1
+---------+ +---------+ +---------+ +-----------------+
| | <------ | | <----- | |<----| blocking_sender |
| QDR.A | | QDR.B | | QDR.C | +-----------------+
| | ------> | | ------> | | +-------------------+
+---------+ +---------+ +---------+---->| blocking_receiver |
^ | +-------------------+
| |
| V
+---------+
| |
| QDR.D |
| |
+---------+
QDR.D acting broker #2
"""
@classmethod
def get_router(cls, index):
return cls.routers[index]
@classmethod
def setUpClass(cls):
"""Start three routers"""
super(LinkRouteTest, cls).setUpClass()
def router(name, connection):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),
] + connection
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=False))
cls.routers = []
a_listener_port = cls.tester.get_port()
b_listener_port = cls.tester.get_port()
c_listener_port = cls.tester.get_port()
d_listener_port = cls.tester.get_port()
test_tag_listener_port = cls.tester.get_port()
router('A',
[
('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
])
router('B',
[
# Listener for clients, note that the tests assume this listener is first in this list:
('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': b_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
('listener', {'name': 'test-tag', 'role': 'route-container', 'host': '0.0.0.0', 'port': test_tag_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
# This is an route-container connection made from QDR.B's ephemeral port to a_listener_port
('connector', {'name': 'broker', 'role': 'route-container', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
# Only inter router communication must happen on 'inter-router' connectors. This connector makes
# a connection from the router B's ephemeral port to c_listener_port
('connector', {'name': 'routerC', 'role': 'inter-router', 'host': '0.0.0.0', 'port': c_listener_port}),
# This is an on-demand connection made from QDR.B's ephemeral port to d_listener_port
('connector', {'name': 'routerD', 'role': 'route-container', 'host': '0.0.0.0', 'port': d_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
#('linkRoute', {'prefix': 'org.apache', 'connection': 'broker', 'direction': 'in'}),
('linkRoute', {'prefix': 'org.apache', 'containerId': 'QDR.A', 'direction': 'in'}),
('linkRoute', {'prefix': 'org.apache', 'containerId': 'QDR.A', 'direction': 'out'}),
('linkRoute', {'prefix': 'pulp.task', 'connection': 'test-tag', 'direction': 'in'}),
('linkRoute', {'prefix': 'pulp.task', 'connection': 'test-tag', 'direction': 'out'}),
# addresses matching pattern 'a.*.toA.#' route to QDR.A
('linkRoute', {'pattern': 'a.*.toA.#', 'containerId': 'QDR.A', 'direction': 'in'}),
('linkRoute', {'pattern': 'a.*.toA.#', 'containerId': 'QDR.A', 'direction': 'out'}),
# addresses matching pattern 'a.*.toD.#' route to QDR.D
# Dont change dir to direction here so we can make sure that the dir attribute is still working.
('linkRoute', {'pattern': 'a.*.toD.#', 'containerId': 'QDR.D', 'dir': 'in'}),
('linkRoute', {'pattern': 'a.*.toD.#', 'containerId': 'QDR.D', 'dir': 'out'})
]
)
router('C',
[
# The client will exclusively use the following listener to
# connect to QDR.C, the tests assume this is the first entry
# in the list
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}),
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': c_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
# The dot(.) at the end is ignored by the address hashing scheme.
('linkRoute', {'prefix': 'org.apache.', 'direction': 'in'}),
('linkRoute', {'prefix': 'org.apache.', 'direction': 'out'}),
('linkRoute', {'prefix': 'pulp.task', 'direction': 'in'}),
('linkRoute', {'prefix': 'pulp.task', 'direction': 'out'}),
('linkRoute', {'pattern': 'a.*.toA.#', 'direction': 'in'}),
('linkRoute', {'pattern': 'a.*.toA.#', 'direction': 'out'}),
('linkRoute', {'pattern': 'a.*.toD.#', 'direction': 'in'}),
('linkRoute', {'pattern': 'a.*.toD.#', 'direction': 'out'})
]
)
router('D', # sink for QDR.D routes
[
('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': d_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
])
# Wait for the routers to locate each other, and for route propagation
# to settle
cls.routers[1].wait_router_connected('QDR.C')
cls.routers[2].wait_router_connected('QDR.B')
cls.routers[2].wait_address("org.apache", remotes=1, delay=0.5, count=2)
# This is not a classic router network in the sense that QDR.A and D are acting as brokers. We allow a little
# bit more time for the routers to stabilize.
sleep(2)
def run_qdstat_linkRoute(self, address, args=None):
cmd = ['qdstat', '--bus', str(address), '--timeout', str(TIMEOUT)] + ['--linkroute']
if args:
cmd = cmd + args
p = self.popen(
cmd,
name='qdstat-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, "qdstat exit status %s, output:\n%s" % (p.returncode, out)
return out
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception("%s\n%s" % (e, out))
return out
def test_aaa_qdmanage_query_link_route(self):
"""
qdmanage converts short type to long type and this test specifically tests if qdmanage is actually doing
the type conversion correctly by querying with short type and long type.
"""
cmd = 'QUERY --type=linkRoute'
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
# Make sure there is a dir of in and out.
self.assertIn('"direction": "in"', out)
self.assertIn('"direction": "out"', out)
self.assertIn('"containerId": "QDR.A"', out)
# Use the long type and make sure that qdmanage does not mess up the long type
cmd = 'QUERY --type=org.apache.qpid.dispatch.router.config.linkRoute'
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
# Make sure there is a dir of in and out.
self.assertIn('"direction": "in"', out)
self.assertIn('"direction": "out"', out)
self.assertIn('"containerId": "QDR.A"', out)
identity = out[out.find("identity") + 12: out.find("identity") + 13]
cmd = 'READ --type=linkRoute --identity=' + identity
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
self.assertIn(identity, out)
exception_occurred = False
try:
# This identity should not be found
cmd = 'READ --type=linkRoute --identity=9999'
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
except Exception as e:
exception_occurred = True
self.assertIn("NotFoundStatus: Not Found", str(e))
self.assertTrue(exception_occurred)
exception_occurred = False
try:
# There is no identity specified, this is a bad request
cmd = 'READ --type=linkRoute'
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
except Exception as e:
exception_occurred = True
self.assertIn("BadRequestStatus: No name or identity provided", str(e))
self.assertTrue(exception_occurred)
cmd = 'CREATE --type=autoLink address=127.0.0.1 direction=in connection=routerC'
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
identity = out[out.find("identity") + 12: out.find("identity") + 14]
cmd = 'READ --type=autoLink --identity=' + identity
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
self.assertIn(identity, out)
def test_bbb_qdstat_link_routes_routerB(self):
"""
Runs qdstat on router B to make sure that router B has 4 link routes,
each having one 'in' and one 'out' entry
"""
out = self.run_qdstat_linkRoute(self.routers[1].addresses[0])
for route in ['a.*.toA.#', 'a.*.toD.#', 'org.apache', 'pulp.task']:
self.assertIn(route, out)
out_list = out.split()
self.assertEqual(out_list.count('in'), 4)
self.assertEqual(out_list.count('out'), 4)
parts = out.split("\n")
self.assertEqual(len(parts), 15)
out = self.run_qdstat_linkRoute(self.routers[1].addresses[0], args=['--limit=1'])
parts = out.split("\n")
self.assertEqual(len(parts), 8)
def test_ccc_qdstat_link_routes_routerC(self):
"""
Runs qdstat on router C to make sure that router C has 4 link routes,
each having one 'in' and one 'out' entry
"""
out = self.run_qdstat_linkRoute(self.routers[2].addresses[0])
out_list = out.split()
self.assertEqual(out_list.count('in'), 4)
self.assertEqual(out_list.count('out'), 4)
def test_ddd_partial_link_route_match(self):
"""
The linkRoute on Routers C and B is set to org.apache.
Creates a receiver listening on the address 'org.apache.dev' and a sender that sends to address 'org.apache.dev'.
Sends a message to org.apache.dev via router QDR.C and makes sure that the message was successfully
routed (using partial address matching) and received using pre-created links that were created as a
result of specifying addresses in the linkRoute attribute('org.apache.').
"""
hello_world_1 = "Hello World_1!"
# Connects to listener #2 on QDR.C
addr = self.routers[2].addresses[0]
blocking_connection = BlockingConnection(addr)
# Receive on org.apache.dev
blocking_receiver = blocking_connection.create_receiver(address="org.apache.dev")
apply_options = AtMostOnce()
# Sender to org.apache.dev
blocking_sender = blocking_connection.create_sender(address="org.apache.dev", options=apply_options)
msg = Message(body=hello_world_1)
# Send a message
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(hello_world_1, received_message.body)
# Connect to the router acting like the broker (QDR.A) and check the deliveriesIngress and deliveriesEgress
local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)
self.assertEqual(u'QDR.A', local_node.query(type='org.apache.qpid.dispatch.router',
attribute_names=[u'id']).results[0][0])
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache.dev').deliveriesEgress)
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache.dev').deliveriesIngress)
# There should be 4 links -
# 1. outbound receiver link on org.apache.dev
# 2. inbound sender link on blocking_sender
# 3. inbound link to the $management
# 4. outbound link to $management
# self.assertEqual(4, len()
self.assertEqual(4, len(local_node.query(type='org.apache.qpid.dispatch.router.link').results))
blocking_connection.close()
def test_partial_link_route_match_1(self):
"""
This test is pretty much the same as the previous test (test_partial_link_route_match) but the connection is
made to router QDR.B instead of QDR.C and we expect to see the same behavior.
"""
hello_world_2 = "Hello World_2!"
addr = self.routers[1].addresses[0]
blocking_connection = BlockingConnection(addr)
# Receive on org.apache.dev
blocking_receiver = blocking_connection.create_receiver(address="org.apache.dev.1")
apply_options = AtMostOnce()
# Sender to to org.apache.dev
blocking_sender = blocking_connection.create_sender(address="org.apache.dev.1", options=apply_options)
msg = Message(body=hello_world_2)
# Send a message
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(hello_world_2, received_message.body)
local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)
# Make sure that the router node acting as the broker (QDR.A) had one message routed through it. This confirms
# that the message was link routed
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache.dev.1').deliveriesEgress)
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache.dev.1').deliveriesIngress)
blocking_connection.close()
def test_full_link_route_match(self):
"""
The linkRoute on Routers C and B is set to org.apache.
Creates a receiver listening on the address 'org.apache' and a sender that sends to address 'org.apache'.
Sends a message to org.apache via router QDR.C and makes sure that the message was successfully
routed (using full address matching) and received using pre-created links that were created as a
result of specifying addresses in the linkRoute attribute('org.apache.').
"""
hello_world_3 = "Hello World_3!"
# Connects to listener #2 on QDR.C
addr = self.routers[2].addresses[0]
blocking_connection = BlockingConnection(addr)
# Receive on org.apache
blocking_receiver = blocking_connection.create_receiver(address="org.apache")
apply_options = AtMostOnce()
# Sender to to org.apache
blocking_sender = blocking_connection.create_sender(address="org.apache", options=apply_options)
msg = Message(body=hello_world_3)
# Send a message
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(hello_world_3, received_message.body)
local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)
# Make sure that the router node acting as the broker (QDR.A) had one message routed through it. This confirms
# that the message was link routed
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache').deliveriesEgress)
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache').deliveriesIngress)
blocking_connection.close()
def _link_route_pattern_match(self, connect_node, include_host,
exclude_host, test_address,
expected_pattern):
"""
This helper function ensures that messages sent to 'test_address' pass
through 'include_host', and are *not* routed to 'exclude_host'
"""
hello_pattern = "Hello Pattern!"
route = 'M0' + test_address
# Connect to the two 'waypoints', ensure the route is not present on
# either
node_A = Node.connect(include_host, timeout=TIMEOUT)
node_B = Node.connect(exclude_host, timeout=TIMEOUT)
for node in [node_A, node_B]:
self.assertRaises(NotFoundStatus,
node.read,
type='org.apache.qpid.dispatch.router.address',
name=route)
# wait until the host we're connecting to gets its next hop for the
# pattern we're connecting to
connect_node.wait_address(expected_pattern, remotes=1, delay=0.1, count=2)
# Connect to 'connect_node' and send message to 'address'
blocking_connection = BlockingConnection(connect_node.addresses[0])
blocking_receiver = blocking_connection.create_receiver(address=test_address)
blocking_sender = blocking_connection.create_sender(address=test_address,
options=AtMostOnce())
msg = Message(body=hello_pattern)
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(hello_pattern, received_message.body)
# verify test_address is only present on include_host and not on exclude_host
self.assertRaises(NotFoundStatus,
node_B.read,
type='org.apache.qpid.dispatch.router.address',
name=route)
self.assertEqual(1, node_A.read(type='org.apache.qpid.dispatch.router.address',
name=route).deliveriesIngress)
self.assertEqual(1, node_A.read(type='org.apache.qpid.dispatch.router.address',
name=route).deliveriesIngress)
# drop the connection and verify that test_address is no longer on include_host
blocking_connection.close()
timeout = time() + TIMEOUT
while True:
try:
node_A.read(type='org.apache.qpid.dispatch.router.address',
name=route)
if time() > timeout:
raise Exception("Expected route '%s' to expire!" % route)
sleep(0.1)
except NotFoundStatus:
break
node_A.close()
node_B.close()
def test_link_route_pattern_match(self):
"""
Verify the addresses match the proper patterns and are routed to the
proper 'waypoint' only
"""
qdr_A = self.routers[0].addresses[0]
qdr_D = self.routers[3].addresses[0]
qdr_C = self.routers[2] # note: the node, not the address!
self._link_route_pattern_match(connect_node=qdr_C,
include_host=qdr_A,
exclude_host=qdr_D,
test_address='a.notD.toA',
expected_pattern='a.*.toA.#')
self._link_route_pattern_match(connect_node=qdr_C,
include_host=qdr_D,
exclude_host=qdr_A,
test_address='a.notA.toD',
expected_pattern='a.*.toD.#')
self._link_route_pattern_match(connect_node=qdr_C,
include_host=qdr_A,
exclude_host=qdr_D,
test_address='a.toD.toA.xyz',
expected_pattern='a.*.toA.#')
self._link_route_pattern_match(connect_node=qdr_C,
include_host=qdr_D,
exclude_host=qdr_A,
test_address='a.toA.toD.abc',
expected_pattern='a.*.toD.#')
def test_custom_annotations_match(self):
"""
The linkRoute on Routers C and B is set to org.apache.
Creates a receiver listening on the address 'org.apache' and a sender that sends to address 'org.apache'.
Sends a message with custom annotations to org.apache via router QDR.C and makes sure that the message was successfully
routed (using full address matching) and received using pre-created links that were created as a
result of specifying addresses in the linkRoute attribute('org.apache.'). Make sure custom annotations arrived as well.
"""
hello_world_3 = "Hello World_3!"
# Connects to listener #2 on QDR.C
addr = self.routers[2].addresses[0]
blocking_connection = BlockingConnection(addr)
# Receive on org.apache
blocking_receiver = blocking_connection.create_receiver(address="org.apache.2")
apply_options = AtMostOnce()
# Sender to to org.apache
blocking_sender = blocking_connection.create_sender(address="org.apache.2", options=apply_options)
msg = Message(body=hello_world_3)
annotations = {'custom-annotation': '1/Custom_Annotation'}
msg.annotations = annotations
# Send a message
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(hello_world_3, received_message.body)
self.assertEqual(received_message.annotations, annotations)
blocking_connection.close()
def test_full_link_route_match_1(self):
"""
This test is pretty much the same as the previous test (test_full_link_route_match) but the connection is
made to router QDR.B instead of QDR.C and we expect the message to be link routed successfully.
"""
hello_world_4 = "Hello World_4!"
addr = self.routers[1].addresses[0]
blocking_connection = BlockingConnection(addr)
# Receive on org.apache
blocking_receiver = blocking_connection.create_receiver(address="org.apache.1")
apply_options = AtMostOnce()
# Sender to to org.apache
blocking_sender = blocking_connection.create_sender(address="org.apache.1", options=apply_options)
msg = Message(body=hello_world_4)
# Send a message
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(hello_world_4, received_message.body)
local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)
# Make sure that the router node acting as the broker (QDR.A) had one message routed through it. This confirms
# that the message was link routed
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache.1').deliveriesEgress)
self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address',
name='M0org.apache.1').deliveriesIngress)
blocking_connection.close()
def test_zzz_qdmanage_delete_link_route(self):
"""
We are deleting the link route using qdmanage short name. This should be the last test to run
"""
local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT)
res = local_node.query(type='org.apache.qpid.dispatch.router')
results = res.results[0]
attribute_list = res.attribute_names
result_list = local_node.query(type='org.apache.qpid.dispatch.router.config.linkRoute').results
self.assertEqual(results[attribute_list.index('linkRouteCount')], len(result_list))
# First delete linkRoutes on QDR.B
for rid in range(8):
cmd = 'DELETE --type=linkRoute --identity=' + result_list[rid][1]
self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
cmd = 'QUERY --type=linkRoute'
out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0])
self.assertEqual(out.rstrip(), '[]')
# linkRoutes now gone on QDR.B but remember that it still exist on QDR.C
# We will now try to create a receiver on address org.apache.dev on QDR.C.
# Since the linkRoute on QDR.B is gone, QDR.C
# will not allow a receiver to be created since there is no route to destination.
# Connects to listener #2 on QDR.C
addr = self.routers[2].addresses[0]
# Now delete linkRoutes on QDR.C to eradicate linkRoutes completely
local_node = Node.connect(addr, timeout=TIMEOUT)
result_list = local_node.query(type='org.apache.qpid.dispatch.router.config.linkRoute').results
# QDR.C has 8 link routes configured, nuke 'em:
self.assertEqual(8, len(result_list))
for rid in range(8):
cmd = 'DELETE --type=linkRoute --identity=' + result_list[rid][1]
self.run_qdmanage(cmd=cmd, address=addr)
cmd = 'QUERY --type=linkRoute'
out = self.run_qdmanage(cmd=cmd, address=addr)
self.assertEqual(out.rstrip(), '[]')
res = local_node.query(type='org.apache.qpid.dispatch.router')
results = res.results[0]
attribute_list = res.attribute_names
self.assertEqual(results[attribute_list.index('linkRouteCount')], 0)
blocking_connection = BlockingConnection(addr, timeout=3)
# Receive on org.apache.dev (this address used to be linkRouted but not anymore since we deleted linkRoutes
# on both QDR.C and QDR.B)
blocking_receiver = blocking_connection.create_receiver(address="org.apache.dev")
apply_options = AtMostOnce()
hello_world_1 = "Hello World_1!"
# Sender to org.apache.dev
blocking_sender = blocking_connection.create_sender(address="org.apache.dev", options=apply_options)
msg = Message(body=hello_world_1)
# Send a message
blocking_sender.send(msg)
received_message = blocking_receiver.receive(timeout=5)
self.assertEqual(hello_world_1, received_message.body)
def test_yyy_delivery_tag(self):
"""
Tests that the router carries over the delivery tag on a link routed delivery
"""
listening_address = self.routers[1].addresses[1]
sender_address = self.routers[2].addresses[0]
qdstat_address = self.routers[2].addresses[0]
test = DeliveryTagsTest(sender_address, listening_address, qdstat_address)
test.run()
self.assertEqual(None, test.error)
def test_yyy_invalid_delivery_tag(self):
test = InvalidTagTest(self.routers[2].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_close_with_unsettled(self):
test = CloseWithUnsettledTest(self.routers[1].addresses[0], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_www_drain_support_all_messages(self):
drain_support = DrainMessagesHandler(self.routers[2].addresses[0])
drain_support.run()
self.assertEqual(None, drain_support.error)
def test_www_drain_support_one_message(self):
drain_support = DrainOneMessageHandler(self.routers[2].addresses[0])
drain_support.run()
self.assertEqual(None, drain_support.error)
def test_www_drain_support_no_messages(self):
drain_support = DrainNoMessagesHandler(self.routers[2].addresses[0])
drain_support.run()
self.assertEqual(None, drain_support.error)
def test_www_drain_support_no_more_messages(self):
drain_support = DrainNoMoreMessagesHandler(self.routers[2].addresses[0])
drain_support.run()
self.assertEqual(None, drain_support.error)
def test_link_route_terminus_address(self):
# The receiver is attaching to router B to a listener that has link route for address 'pulp.task' setup.
listening_address = self.routers[1].addresses[1]
# Run the query on a normal port
query_address_listening = self.routers[1].addresses[0]
# Sender is attaching to router C
sender_address = self.routers[2].addresses[0]
query_address_sending = self.routers[2].addresses[0]
test = TerminusAddrTest(sender_address, listening_address, query_address_sending, query_address_listening)
test.run()
self.assertTrue(test.in_receiver_found)
self.assertTrue(test.out_receiver_found)
self.assertTrue(test.in_sender_found)
self.assertTrue(test.out_sender_found)
def test_dynamic_source(self):
test = DynamicSourceTest(self.routers[1].addresses[0], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_dynamic_target(self):
test = DynamicTargetTest(self.routers[1].addresses[0], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_detach_without_close(self):
test = DetachNoCloseTest(self.routers[1].addresses[0], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def test_detach_mixed_close(self):
test = DetachMixedCloseTest(self.routers[1].addresses[0], self.routers[1].addresses[1])
test.run()
self.assertEqual(None, test.error)
def _multi_link_send_receive(self, send_host, receive_host, name):
senders = ["%s/%s" % (send_host, address) for address in ["org.apache.foo", "org.apache.bar"]]
receivers = ["%s/%s" % (receive_host, address) for address in ["org.apache.foo", "org.apache.bar"]]
test = MultiLinkSendReceive(senders, receivers, name)
test.run()
self.assertEqual(None, test.error)
def test_same_name_route_receivers_through_B(self):
self._multi_link_send_receive(self.routers[0].addresses[0], self.routers[1].addresses[0], "recv_through_B")
def test_same_name_route_senders_through_B(self):
self._multi_link_send_receive(self.routers[1].addresses[0], self.routers[0].addresses[0], "send_through_B")
def test_same_name_route_receivers_through_C(self):
self._multi_link_send_receive(self.routers[0].addresses[0], self.routers[2].addresses[0], "recv_through_C")
def test_same_name_route_senders_through_C(self):
self._multi_link_send_receive(self.routers[2].addresses[0], self.routers[0].addresses[0], "send_through_C")
def test_echo_detach_received(self):
"""
Create two receivers to link routed address org.apache.dev
Create a sender to the same address that the receiver is listening on and send 100 messages.
After the receivers receive 10 messages each, the receivers will detach and expect to receive ten
detaches in response.
"""
test = EchoDetachReceived(self.routers[2].addresses[0], self.routers[2].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_bad_link_route_config(self):
"""
What happens when the link route create request is malformed?
"""
mgmt = self.routers[1].management
# zero length prefix
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-1",
attributes={'prefix': '',
'containerId': 'FakeBroker',
'direction': 'in'})
# pattern wrong type
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-2",
attributes={'pattern': 666,
'containerId': 'FakeBroker',
'direction': 'in'})
# invalid pattern (no tokens)
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-3",
attributes={'pattern': '///',
'containerId': 'FakeBroker',
'direction': 'in'})
# empty attributes
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-4",
attributes={})
# both pattern and prefix
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-5",
attributes={'prefix': 'a1',
'pattern': 'b2',
'containerId': 'FakeBroker',
'direction': 'in'})
# bad direction
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-6",
attributes={'pattern': 'b2',
'containerId': 'FakeBroker',
'direction': 'nowhere'})
# bad distribution
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-7",
attributes={'pattern': 'b2',
'containerId': 'FakeBroker',
'direction': 'in',
"distribution": "dilly dilly"})
# no direction
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-8",
attributes={'prefix': 'b2',
'containerId': 'FakeBroker'})
# neither pattern nor prefix
self.assertRaises(BadRequestStatus,
mgmt.create,
type="org.apache.qpid.dispatch.router.config.linkRoute",
name="bad-9",
attributes={'direction': 'out',
'containerId': 'FakeBroker'})
class DeliveryTagsTest(MessagingHandler):
def __init__(self, sender_address, listening_address, qdstat_address):
super(DeliveryTagsTest, self).__init__()
self.sender_address = sender_address
self.listening_address = listening_address
self.sender = None
self.receiver_connection = None
self.sender_connection = None
self.qdstat_address = qdstat_address
self.id = '1235'
self.times = 1
self.sent = 0
self.rcvd = 0
self.delivery_tag_verified = False
# The delivery tag we are going to send in the transfer frame
# We will later make sure that the same delivery tag shows up on the receiving end in the link routed case.
# KAG: force the literal to type 'str' due to SWIG weirdness: on 2.X a
# delivery tag cannot be unicode (must be binary), but on 3.X it must
# be unicode! See https://issues.apache.org/jira/browse/PROTON-1843
self.delivery_tag = str('92319')
self.error = None
def timeout(self):
self.error = "Timeout expired: sent=%d rcvd=%d" % (self.sent, self.rcvd)
if self.receiver_connection:
self.receiver_connection.close()
if self.sender_connection:
self.sender_connection.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.receiver_connection = event.container.connect(self.listening_address)
def on_connection_remote_open(self, event):
if event.connection == self.receiver_connection:
continue_loop = True
# Don't open the sender connection unless we can make sure that there is a remote receiver ready to
# accept the message.
# If there is no remote receiver, the router will throw a 'No route to destination' error when
# creating sender connection.
# The following loops introduces a wait before creating the sender connection. It gives time to the
# router so that the address Dpulp.task can show up on the remoteCount
i = 0
while continue_loop:
if i > 100: # If we have run the read command for more than hundred times and we still do not have
# the remoteCount set to 1, there is a problem, just exit out of the function instead
# of looping to infinity.
self.receiver_connection.close()
return
local_node = Node.connect(self.qdstat_address, timeout=TIMEOUT)
out = local_node.read(type='org.apache.qpid.dispatch.router.address', name='Dpulp.task').remoteCount
if out == 1:
continue_loop = False
else:
i += 1
sleep(0.25)
self.sender_connection = event.container.connect(self.sender_address)
self.sender = event.container.create_sender(self.sender_connection, "pulp.task", options=AtMostOnce())
def on_sendable(self, event):
if self.times == 1:
msg = Message(body="Hello World")
self.sender.send(msg, tag=self.delivery_tag)
self.times += 1
self.sent += 1
def on_message(self, event):
if "Hello World" == event.message.body:
self.rcvd += 1
# If the tag on the delivery is the same as the tag we sent with the initial transfer, it means
# that the router has propagated the delivery tag successfully because of link routing.
if self.delivery_tag != event.delivery.tag:
self.error = "Delivery-tag: expected:%r got:%r" % (self.delivery_tag, event.delivery.tag)
self.receiver_connection.close()
self.sender_connection.close()
self.timer.cancel()
def run(self):
Container(self).run()
class CloseWithUnsettledTest(MessagingHandler):
##
# This test sends a message across an attach-routed link. While the message
# is unsettled, the client link is closed. The test is ensuring that the
# router does not crash during the closing of the links.
##
def __init__(self, normal_addr, route_addr):
super(CloseWithUnsettledTest, self).__init__(prefetch=0, auto_accept=False)
self.normal_addr = normal_addr
self.route_addr = route_addr
self.dest = "pulp.task.CWUtest"
self.error = None
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.conn_normal.close()
self.conn_route.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn_route = event.container.connect(self.route_addr)
def on_connection_opened(self, event):
if event.connection == self.conn_route:
self.conn_normal = event.container.connect(self.normal_addr)
elif event.connection == self.conn_normal:
self.sender = event.container.create_sender(self.conn_normal, self.dest)
def on_connection_closed(self, event):
self.conn_route.close()
self.timer.cancel()
def on_link_opened(self, event):
if event.receiver:
self.receiver = event.receiver
self.receiver.flow(1)
def on_sendable(self, event):
msg = Message(body="CloseWithUnsettled")
event.sender.send(msg)
def on_message(self, event):
self.conn_normal.close()
def run(self):
Container(self).run()
class DynamicSourceTest(MessagingHandler):
##
# This test verifies that a dynamic source can be propagated via link-route to
# a route-container.
##
def __init__(self, normal_addr, route_addr):
super(DynamicSourceTest, self).__init__(prefetch=0, auto_accept=False)
self.normal_addr = normal_addr
self.route_addr = route_addr
self.dest = "pulp.task.DynamicSource"
self.address = "DynamicSourceAddress"
self.error = None
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.conn_normal.close()
self.conn_route.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn_route = event.container.connect(self.route_addr)
def on_connection_opened(self, event):
if event.connection == self.conn_route:
self.conn_normal = event.container.connect(self.normal_addr)
elif event.connection == self.conn_normal:
self.receiver = event.container.create_receiver(self.conn_normal, None, dynamic=True, options=DynamicNodeProperties({"x-opt-qd.address": u"pulp.task.abc"}))
def on_link_opened(self, event):
if event.receiver == self.receiver:
if self.receiver.remote_source.address != self.address:
self.error = "Expected %s, got %s" % (self.address, self.receiver.remote_source.address)
self.conn_normal.close()
self.conn_route.close()
self.timer.cancel()
def on_link_opening(self, event):
if event.sender:
self.sender = event.sender
if not self.sender.remote_source.dynamic:
self.error = "Expected sender with dynamic source"
self.conn_normal.close()
self.conn_route.close()
self.timer.cancel()
self.sender.source.address = self.address
self.sender.open()
def run(self):
Container(self).run()
class DynamicTarget(LinkOption):
def apply(self, link):
link.target.dynamic = True
link.target.address = None
class DynamicTargetTest(MessagingHandler):
##
# This test verifies that a dynamic source can be propagated via link-route to
# a route-container.
##
def __init__(self, normal_addr, route_addr):
super(DynamicTargetTest, self).__init__(prefetch=0, auto_accept=False)
self.normal_addr = normal_addr
self.route_addr = route_addr
self.dest = "pulp.task.DynamicTarget"
self.address = "DynamicTargetAddress"
self.error = None
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.conn_normal.close()
self.conn_route.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn_route = event.container.connect(self.route_addr)
def on_connection_opened(self, event):
if event.connection == self.conn_route:
self.conn_normal = event.container.connect(self.normal_addr)
elif event.connection == self.conn_normal:
self.sender = event.container.create_sender(self.conn_normal, None, options=[DynamicTarget(), DynamicNodeProperties({"x-opt-qd.address": u"pulp.task.abc"})])
def on_link_opened(self, event):
if event.sender == self.sender:
if self.sender.remote_target.address != self.address:
self.error = "Expected %s, got %s" % (self.address, self.receiver.remote_source.address)
self.conn_normal.close()
self.conn_route.close()
self.timer.cancel()
def on_link_opening(self, event):
if event.receiver:
self.receiver = event.receiver
if not self.receiver.remote_target.dynamic:
self.error = "Expected receiver with dynamic source"
self.conn_normal.close()
self.conn_route.close()
self.timer.cancel()
self.receiver.target.address = self.address
self.receiver.open()
def run(self):
Container(self).run()
class DetachNoCloseTest(MessagingHandler):
##
# This test verifies that link-detach (not close) is propagated properly
##
def __init__(self, normal_addr, route_addr):
super(DetachNoCloseTest, self).__init__(prefetch=0, auto_accept=False)
self.normal_addr = normal_addr
self.route_addr = route_addr
self.dest = "pulp.task.DetachNoClose"
self.error = None
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.conn_normal.close()
self.conn_route.close()
def stop(self):
self.conn_normal.close()
self.conn_route.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn_route = event.container.connect(self.route_addr)
def on_connection_opened(self, event):
if event.connection == self.conn_route:
self.conn_normal = event.container.connect(self.normal_addr)
elif event.connection == self.conn_normal:
self.receiver = event.container.create_receiver(self.conn_normal, self.dest)
def on_link_opened(self, event):
if event.receiver == self.receiver:
self.receiver.detach()
def on_link_remote_detach(self, event):
if event.sender == self.sender:
self.sender.detach()
if event.receiver == self.receiver:
##
# Test passed, we expected a detach on the propagated sender and back
##
self.stop()
def on_link_closing(self, event):
if event.sender == self.sender:
self.error = 'Propagated link was closed. Expected it to be detached'
self.stop()
if event.receiver == self.receiver:
self.error = 'Client link was closed. Expected it to be detached'
self.stop()
def on_link_opening(self, event):
if event.sender:
self.sender = event.sender
self.sender.source.address = self.sender.remote_source.address
self.sender.open()
def run(self):
Container(self).run()
class DetachMixedCloseTest(MessagingHandler):
##
# This test verifies that link-detach (not close) is propagated properly
##
def __init__(self, normal_addr, route_addr):
super(DetachMixedCloseTest, self).__init__(prefetch=0, auto_accept=False)
self.normal_addr = normal_addr
self.route_addr = route_addr
self.dest = "pulp.task.DetachMixedClose"
self.error = None
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.conn_normal.close()
self.conn_route.close()
def stop(self):
self.conn_normal.close()
self.conn_route.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn_route = event.container.connect(self.route_addr)
def on_connection_opened(self, event):
if event.connection == self.conn_route:
self.conn_normal = event.container.connect(self.normal_addr)
elif event.connection == self.conn_normal:
self.receiver = event.container.create_receiver(self.conn_normal, self.dest)
def on_link_opened(self, event):
if event.receiver == self.receiver:
self.receiver.detach()
def on_link_remote_detach(self, event):
if event.sender == self.sender:
self.sender.close()
if event.receiver == self.receiver:
self.error = 'Client link was detached. Expected it to be closed'
self.stop()
def on_link_closing(self, event):
if event.sender == self.sender:
self.error = 'Propagated link was closed. Expected it to be detached'
self.stop()
if event.receiver == self.receiver:
##
# Test Passed
##
self.stop()
def on_link_opening(self, event):
if event.sender:
self.sender = event.sender
self.sender.source.address = self.sender.remote_source.address
self.sender.open()
def run(self):
Container(self).run()
# Test to validate fix for DISPATCH-927
class EchoDetachReceived(MessagingHandler):
def __init__(self, sender_address, recv_address):
super(EchoDetachReceived, self).__init__()
self.sender_address = sender_address
self.recv_address = recv_address
self.dest = "org.apache.dev"
self.num_msgs = 100
self.num_receivers = 10
self.msgs_sent = 0
self.receiver_conn = None
self.sender_conn = None
self.sender = None
self.receiver_dict = {}
self.error = None
self.receiver_attaches = 0
self.timer = None
self.sender_attached = False
self.received_msgs_dict = {}
self.receiver_detach_dict = {}
self.num_detaches_echoed = 0
@property
def msgs_received(self):
return sum(self.received_msgs_dict.values())
def timeout(self):
self.bail("Timeout Expired: msgs_sent=%d msgs_received=%d, number of detaches received=%d"
% (self.msgs_sent, self.msgs_received, self.num_detaches_echoed))
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
# Create two separate connections for sender and receivers
self.receiver_conn = event.container.connect(self.recv_address)
self.sender_conn = event.container.connect(self.sender_address)
for i in range(self.num_receivers):
name = "R%d" % i
self.receiver_dict[name] = event.container.create_receiver(self.receiver_conn, self.dest, name=name)
self.received_msgs_dict[name] = 0
def bail(self, text=None):
self.error = text
self.sender_conn.close()
self.receiver_conn.close()
self.timer.cancel()
def on_link_opened(self, event):
if event.receiver:
if event.receiver.name in list(self.receiver_dict):
self.receiver_attaches += 1
# The response receiver attaches have been received. The receiver sent attaches which was link routed
# all the way to the 'broker' router and the response attaches have come back.
# It is now time to create the sender.
if self.receiver_attaches == self.num_receivers:
self.sender = event.container.create_sender(self.sender_conn, self.dest)
elif event.sender:
if not self.sender_attached:
if event.sender == self.sender:
# The sender attaches were link routed as well and the response attach has been received.
self.sender_attached = True
def on_sendable(self, event):
# The sender will send 100 messages
if self.receiver_attaches == self.num_receivers and self.sender_attached:
if self.msgs_sent < self.num_msgs:
msg = Message(body="Hello World")
self.sender.send(msg)
self.msgs_sent += 1
def on_message(self, event):
if event.receiver and event.receiver.name in list(self.receiver_dict):
self.received_msgs_dict[event.receiver.name] += 1
if sum(self.received_msgs_dict.values()) == self.num_msgs:
# The receivers have received a total of 100 messages. Close the receivers. The detach sent by these
# receivers will travel all the way over the link route and the 'broker' router will respond with a
# detach
for receiver in list(self.receiver_dict):
self.receiver_dict[receiver].close()
def on_link_closed(self, event):
if event.receiver.name in list(self.receiver_dict) and event.receiver.name not in list(self.receiver_detach_dict):
self.receiver_detach_dict[event.receiver.name] = event.receiver
self.num_detaches_echoed += 1
# Terminate the test only if both detach frames have been received.
if all(receiver in list(self.receiver_detach_dict) for receiver in list(self.receiver_dict)):
self.bail()
def run(self):
Container(self).run()
class TerminusAddrTest(MessagingHandler):
"""
This tests makes sure that the link route address is visible in the output of qdstat -l command.
Sets up a sender on address pulp.task.terminusTestSender and a receiver on pulp.task.terminusTestReceiver.
Connects to the router to which the sender is attached and makes sure that the pulp.task.terminusTestSender address
shows up with an 'in' and 'out'
Similarly connects to the router to which the receiver is attached and makes sure that the
pulp.task.terminusTestReceiver address shows up with an 'in' and 'out'
"""
def __init__(self, sender_address, listening_address, query_address_sending, query_address_listening):
super(TerminusAddrTest, self).__init__()
self.sender_address = sender_address
self.listening_address = listening_address
self.sender = None
self.receiver = None
self.message_received = False
self.receiver_connection = None
self.sender_connection = None
# We will run a query on the same router where the sender is attached
self.query_address_sending = query_address_sending
# We will run a query on the same router where the receiver is attached
self.query_address_listening = query_address_listening
self.count = 0
self.in_receiver_found = False
self.out_receiver_found = False
self.in_sender_found = False
self.out_sender_found = False
self.receiver_link_opened = False
self.sender_link_opened = False
def on_start(self, event):
self.receiver_connection = event.container.connect(self.listening_address)
def on_connection_remote_open(self, event):
if event.connection == self.receiver_connection:
continue_loop = True
# The following loops introduces a wait. It gives time to the
# router so that the address Dpulp.task can show up on the remoteCount
i = 0
while continue_loop:
if i > 100: # If we have run the read command for more than hundred times and we still do not have
# the remoteCount set to 1, there is a problem, just exit out of the function instead
# of looping to infinity.
self.receiver_connection.close()
return
local_node = Node.connect(self.query_address_sending, timeout=TIMEOUT)
out = local_node.read(type='org.apache.qpid.dispatch.router.address', name='Dpulp.task').remoteCount
if out == 1:
continue_loop = False
i += 1
sleep(0.25)
self.sender_connection = event.container.connect(self.sender_address)
# Notice here that the receiver and sender are listening on different addresses. Receiver on
# pulp.task.terminusTestReceiver and the sender on pulp.task.terminusTestSender
self.receiver = event.container.create_receiver(self.receiver_connection, "pulp.task.terminusTestReceiver")
self.sender = event.container.create_sender(self.sender_connection, "pulp.task.terminusTestSender", options=AtMostOnce())
def on_link_opened(self, event):
if event.receiver == self.receiver:
self.receiver_link_opened = True
local_node = Node.connect(self.query_address_listening, timeout=TIMEOUT)
out = local_node.query(type='org.apache.qpid.dispatch.router.link')
link_dir_index = out.attribute_names.index("linkDir")
owning_addr_index = out.attribute_names.index("owningAddr")
# Make sure that the owningAddr M0pulp.task.terminusTestReceiver shows up on both in and out.
# The 'out' link is on address M0pulp.task.terminusTestReceiver outgoing from the router B to the receiver
# The 'in' link is on address M0pulp.task.terminusTestReceiver incoming from router C to router B
for result in out.results:
if result[link_dir_index] == 'in' and result[owning_addr_index] == 'M0pulp.task.terminusTestReceiver':
self.in_receiver_found = True
if result[link_dir_index] == 'out' and result[owning_addr_index] == 'M0pulp.task.terminusTestReceiver':
self.out_receiver_found = True
if event.sender == self.sender:
self.sender_link_opened = True
local_node = Node.connect(self.query_address_sending, timeout=TIMEOUT)
out = local_node.query(type='org.apache.qpid.dispatch.router.link')
link_dir_index = out.attribute_names.index("linkDir")
owning_addr_index = out.attribute_names.index("owningAddr")
# Make sure that the owningAddr M0pulp.task.terminusTestSender shows up on both in and out.
# The 'in' link is on address M0pulp.task.terminusTestSender incoming from sender to router
# The 'out' link is on address M0pulp.task.terminusTestSender outgoing from router C to router B
for result in out.results:
if result[link_dir_index] == 'in' and result[owning_addr_index] == 'M0pulp.task.terminusTestSender':
self.in_sender_found = True
if result[link_dir_index] == 'out' and result[owning_addr_index] == 'M0pulp.task.terminusTestSender':
self.out_sender_found = True
# Shutdown the connections only if the on_link_opened has been called for sender and receiver links.
if self.sender_link_opened and self.receiver_link_opened:
self.sender.close()
self.receiver.close()
self.sender_connection.close()
self.receiver_connection.close()
def run(self):
Container(self).run()
class MultiLinkSendReceive(MessagingHandler):
class SendState(object):
def __init__(self, link):
self.link = link
self.sent = False
self.accepted = False
self.done = False
self.closed = False
def send(self, subject, body):
if not self.sent:
self.link.send(Message(subject=subject, body=body, address=self.link.target.address))
self.sent = True
def on_accepted(self):
self.accepted = True
self.done = True
def close(self):
if not self.closed:
self.closed = True
self.link.close()
self.link.connection.close()
class RecvState(object):
def __init__(self, link):
self.link = link
self.received = False
self.done = False
self.closed = False
def on_message(self):
self.received = True
self.done = True
def close(self):
if not self.closed:
self.closed = True
self.link.close()
self.link.connection.close()
def __init__(self, send_urls, recv_urls, name, message=None):
super(MultiLinkSendReceive, self).__init__()
self.send_urls = send_urls
self.recv_urls = recv_urls
self.senders = {}
self.receivers = {}
self.message = message or "SendReceiveTest"
self.sent = False
self.error = None
self.name = name
def close(self):
for sender in self.senders.values():
sender.close()
for receiver in self.receivers.values():
receiver.close()
def all_done(self):
for sender in self.senders.values():
if not sender.done:
return False
for receiver in self.receivers.values():
if not receiver.done:
return False
return True
def timeout(self):
self.error = "Timeout Expired"
self.close()
def stop_if_all_done(self):
if self.all_done():
self.stop()
def stop(self):
self.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
event.container.container_id = None
for u in self.send_urls:
s = self.SendState(event.container.create_sender(u, name=self.name))
self.senders[s.link.connection.container] = s
for u in self.recv_urls:
r = self.RecvState(event.container.create_receiver(u, name=self.name))
self.receivers[r.link.connection.container] = r
def on_sendable(self, event):
self.senders[event.connection.container].send(self.name, self.message)
def on_message(self, event):
if self.message != event.message.body:
error = "Incorrect message. Got %s, expected %s" % (event.message.body, self.message.body)
self.receivers[event.connection.container].on_message()
self.stop_if_all_done()
def on_accepted(self, event):
self.senders[event.connection.container].on_accepted()
self.stop_if_all_done()
def run(self):
Container(self).run()
class LinkRouteProtocolTest(TestCase):
"""
Test link route implementation against "misbehaving" containers
Uses a custom fake broker (not a router) that can do weird things at the
protocol level.
+-------------+ +---------+ +-----------------+
| | <------ | | <----- | blocking_sender |
| fake broker | | QDR.A | +-----------------+
| | ------> | | ------> +-------------------+
+-------------+ +---------+ | blocking_receiver |
+-------------------+
"""
@classmethod
def setUpClass(cls):
"""Configure and start QDR.A"""
super(LinkRouteProtocolTest, cls).setUpClass()
config = [
('router', {'mode': 'standalone', 'id': 'QDR.A'}),
# for client connections:
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# to connect to the fake broker
('connector', {'name': 'broker',
'role': 'route-container',
'host': '127.0.0.1',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# forward 'org.apache' messages to + from fake broker:
('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'in'}),
('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'out'})
]
config = Qdrouterd.Config(config)
cls.router = cls.tester.qdrouterd('A', config, wait=False)
def _fake_broker(self, cls):
"""Spawn a fake broker listening on the broker's connector
"""
fake_broker = cls(self.router.connector_addresses[0])
# wait until the connection to the fake broker activates
self.router.wait_connectors()
return fake_broker
def test_DISPATCH_1092(self):
# This fake broker will force the session closed after the link
# detaches. Verify that the session comes back up correctly when the
# next client attaches
killer = self._fake_broker(SessionKiller)
for i in range(2):
bconn = BlockingConnection(self.router.addresses[0])
bsender = bconn.create_sender(address="org.apache",
options=AtLeastOnce())
msg = Message(body="Hey!")
bsender.send(msg)
bsender.close()
bconn.close()
killer.join()
class SessionKiller(FakeBroker):
"""DISPATCH-1092: force a session close when the link closes. This should
cause the router to re-create the session when the next client attaches.
"""
def __init__(self, url):
super(SessionKiller, self).__init__(url)
def on_link_closing(self, event):
event.link.close()
event.session.close()
class FakeBrokerDrain(FakeBroker):
"""
DISPATCH-1496 - Make sure that the router does not grant additional credit
when drain is issued by a receiver connected to the router on a
link routed address
"""
def __init__(self, url):
super(FakeBrokerDrain, self).__init__(url)
self.first_flow_received = False
self.first_drain_mode = False
self.second_drain_mode = False
self.error = None
self.num_flows = 0
self.success = False
def on_link_flow(self, event):
if event.link.is_sender:
if event.sender.drain_mode:
if not self.first_drain_mode:
self.first_drain_mode = True
event.sender.drained()
elif not self.second_drain_mode:
self.second_drain_mode = True
if event.link.credit == 1000:
# Without the patch for DISPATCH-1496,
# the event.link.credit value would be 2000
self.success = True
else:
self.success = False
event.sender.drained()
else:
if not self.first_flow_received:
self.first_flow_received = True
msg = Message(body="First Drain Transfer")
event.link.send(msg)
class DrainReceiver(MessagingHandler):
def __init__(self, url, fake_broker):
super(DrainReceiver, self).__init__(prefetch=0, auto_accept=False)
self.url = url
self.received = 0
self.receiver = None
self.first_drain_sent = False
self.second_drain_sent = False
self.first_flow_sent = False
self.receiver_conn = None
self.error = None
self.num_flows = 0
self.fake_broker = fake_broker
def on_start(self, event):
self.receiver_conn = event.container.connect(self.url)
self.receiver = event.container.create_receiver(self.receiver_conn, "org.apache")
# Step 1: Send a flow of 1000 to the router. The router will forward this
# flow to the FakeBroker
self.receiver.flow(1000)
self.first_flow_sent = True
def on_link_flow(self, event):
if event.receiver == self.receiver:
self.num_flows += 1
if self.num_flows == 1:
# Step 4: The response drain received from the FakeBroker
# Step 5: Send second flow of 1000 credits. This is forwarded to the FakeBroker
self.receiver.flow(1000)
self.timer = event.reactor.schedule(3, TestTimeout(self))
elif self.num_flows == 2:
if not self.fake_broker.success:
self.error = "The FakeBroker did not receive correct credit of 1000"
self.receiver_conn.close()
def timeout(self):
# Step 6: The second drain is sent to the router. The router was forwarding the wrong credit (2000) to the FakeBroker
# but with the fix for DISPATCH-1496, the correct credit is forwarded (1000)
self.receiver.drain(0)
self.second_drain_sent = True
def on_message(self, event):
if event.receiver == self.receiver:
self.received += 1
# Step 2: In response to Step 1, the broker has sent the only message in its queue
if self.received == 1:
self.first_drain_sent = True
#print ("First message received. Doing first drain")
# Step 3: The receiver drains after receiving the first message.
# This drain is forwarded to the FakeBroker
self.receiver.drain(0)
def run(self):
Container(self).run()
class LinkRouteDrainTest(TestCase):
"""
Test link route drain implementation.
DISPATCH-1496 alleges that the router is granting extra credit when
forwarding the drain.
Uses a router which connects to a FakeBroker (FB)
+-------------+ +---------+
| | <------ | |
| fake broker | | QDR.A |
| | ------> | | ------> +-------------------+
+-------------+ +---------+ | receiver |
+-------------------+
The router will grant extra credit when the following sequence is used
1. The receiver attaches to the router on a a link routed address called "org.apache"
2. Receiver issues a flow of 1000. The FakeBroker has only one message in its
"examples" queue and it sends it over to the router which forwards it to the receiver
3. After receiving the message the receiver issues a drain(0). This drain is
forwarded to the FakeBroker by the router and the FB responds. There
is not problem with this drain
4. The receiver again gives a flow of 1000 and it is forwarded to the FB. There
are no messages in the broker queue, so the FB sends no messages
5. The receiver again issues a drain(0). At this time, without the fix for
DISPATCH-1496, the router issues double the credit to the FB. Instead
of issuing a credit of 1000, it issues a credit of 2000.
"""
@classmethod
def setUpClass(cls):
"""Configure and start QDR.A"""
super(LinkRouteDrainTest, cls).setUpClass()
config = [
('router', {'mode': 'standalone', 'id': 'QDR.A'}),
# for client connections:
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# to connect to the fake broker
('connector', {'name': 'broker',
'role': 'route-container',
'host': '127.0.0.1',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# forward 'org.apache' messages to + from fake broker:
('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'in'}),
('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'out'})
]
config = Qdrouterd.Config(config)
cls.router = cls.tester.qdrouterd('A', config, wait=False)
def _fake_broker(self, cls):
"""Spawn a fake broker listening on the broker's connector
"""
fake_broker = cls(self.router.connector_addresses[0])
# wait until the connection to the fake broker activates
self.router.wait_connectors()
return fake_broker
def test_DISPATCH_1496(self):
fake_broker = self._fake_broker(FakeBrokerDrain)
drain_receiver = DrainReceiver(self.router.addresses[0], fake_broker)
drain_receiver.run()
self.assertEqual(drain_receiver.error, None)
class EmptyTransferTest(TestCase):
@classmethod
def setUpClass(cls):
super(EmptyTransferTest, cls).setUpClass()
cls.ROUTER_LISTEN_PORT = cls.tester.get_port()
config = [
('router', {'mode': 'standalone', 'id': 'QDR.A'}),
# the client will connect to this listener
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.ROUTER_LISTEN_PORT,
'saslMechanisms': 'ANONYMOUS'}),
# to connect to the fake broker
('connector', {'name': 'broker',
'role': 'route-container',
'host': '127.0.0.1',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
('linkRoute',
{'prefix': 'examples', 'containerId': 'FakeBroker',
'direction': 'in'}),
('linkRoute',
{'prefix': 'examples', 'containerId': 'FakeBroker',
'direction': 'out'})
]
config = Qdrouterd.Config(config)
cls.router = cls.tester.qdrouterd('A', config, wait=False)
def _fake_broker(self, cls):
"""
Spawn a fake broker listening on the broker's connector
"""
fake_broker = cls(self.router.connector_addresses[0])
# wait until the connection to the fake broker activates
self.router.wait_connectors()
return fake_broker
def test_DISPATCH_1988(self):
fake_broker = self._fake_broker(FakeBroker)
AMQP_OPEN_BEGIN_ATTACH = bytearray(
b'\x41\x4d\x51\x50\x00\x01\x00\x00\x00\x00\x00\x21\x02\x00\x00'
b'\x00\x00\x53\x10\xd0\x00\x00\x00\x11\x00\x00\x00\x04\xa1\x06'
b'\x2e\x2f\x73\x65\x6e\x64\x40\x40\x60\x7f\xff\x00\x00\x00\x21'
b'\x02\x00\x00\x00\x00\x53\x11\xd0\x00\x00\x00\x11\x00\x00\x00'
b'\x04\x40\x52\x00\x70\x7f\xff\xff\xff\x70\x7f\xff\xff\xff\x00'
b'\x00\x00\x5b\x02\x00\x00\x00\x00\x53\x12\xd0\x00\x00\x00\x4b'
b'\x00\x00\x00\x0b\xa1\x09\x6d\x79\x5f\x73\x65\x6e\x64\x65\x72'
b'\x52\x00\x42\x50\x02\x50\x00\x00\x53\x28\xd0\x00\x00\x00\x0b'
b'\x00\x00\x00\x05\x40\x52\x00\x40\x52\x00\x42\x00\x53\x29\xd0'
b'\x00\x00\x00\x14\x00\x00\x00\x05\xa1\x08\x65\x78\x61\x6d\x70'
b'\x6c\x65\x73\x52\x00\x40\x52\x00\x42\x40\x40\x52\x00\x53\x00')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the router listening port and send an amqp, open,
# begin, attach. The attach is sent on the link
# routed address, "examples"
s.connect(("0.0.0.0", EmptyTransferTest.ROUTER_LISTEN_PORT))
s.sendall(AMQP_OPEN_BEGIN_ATTACH)
# Give a second for the attach to propagate to the broker and
# for the broker to send a response attach
sleep(1)
data = s.recv(2048)
self.assertIn("examples", repr(data))
# First send a message on link routed address "examples" with
# message body of "message 0"
# Verify the the sent message has been accepted.
TRANSFER_1 = bytearray(b'\x00\x00\x00\x31\x02\x00\x00\x00'
+ b'\x00\x53\x14\xc0\x0f\x0b\x43\x52\x01'
+ b'\xa0\x01\x01\x43\x42'
+ b'\x40\x40\x40\x40\x40\x42\x00\x53'
+ b'\x73\xc0\x02\x01\x44\x00\x53\x77'
+ b'\xa1\x09\x6d\x65\x73\x73\x61\x67'
+ b'\x65\x20\x30')
s.sendall(TRANSFER_1)
sleep(0.5)
data = s.recv(1024)
# The delivery has been accepted.
self.assertIn("x00S$E", repr(data))
# Test case 1
# Send an empty transfer frame to the router and you should
# receive a rejected disposition from the router.
# Without the fix for DISPATCH_1988,
# upon sending this EMPTY_TRANSFER
# the router crashes with the following assert
# qpid-dispatch/src/message.c:1260: qd_message_add_fanout: Assertion `content->pending && qd_buffer_size(content->pending) > 0' failed.
# This is the empty transfer frame that is sent to the router.
# [0x614000030050]: AMQP:FRAME:0 <- @transfer(20) [handle=0, delivery-id=0, delivery-tag=b"\x01", message-format=0, settled=false, batchable=false]
EMPTY_TRANSFER = bytearray(b'\x00\x00\x00\x1c\x02\x00\x00\x00'
+ b'\x00\x53\x14\xc0\x0f\x0b\x43\x52'
+ b'\x02\xa0\x01\x02\x43\x42'
+ b'\x42\x40\x40\x40\x40\x42')
s.sendall(EMPTY_TRANSFER)
sleep(1)
data = s.recv(1024)
# The delivery has been rejected.
self.assertIn("x00S%E", repr(data))
# Let's send another transfer to make sure that the
# router has not crashed.
TRANSFER_1 = bytearray(b'\x00\x00\x00\x31\x02\x00\x00\x00'
+ b'\x00\x53\x14\xc0\x0f\x0b\x43\x52\x03'
+ b'\xa0\x01\x03\x43\x42'
+ b'\x40\x40\x40\x40\x40\x42\x00\x53'
+ b'\x73\xc0\x02\x01\x44\x00\x53\x77'
+ b'\xa1\x09\x6d\x65\x73\x73\x61\x67'
+ b'\x65\x20\x30')
s.sendall(TRANSFER_1)
sleep(0.5)
data = s.recv(1024)
# The delivery has been accepted.
self.assertIn("x00S$E", repr(data))
# Test case 2
# Now, send two empty transfer frames, first transfer has
# more=true and the next transfer has more=false.
# This will again be rejected by the router.
# The following are the two transfer frames that will be
# sent to the router.
#[0x614000020050]: AMQP:FRAME: 0 <- @ transfer(20)[handle = 0, delivery - id = 4, delivery - tag = b"\x04", message - format = 0, settled = false, more = true, batchable = false]
#[0x614000020050]: AMQP:FRAME: 0 <- @ transfer(20)[handle = 0, delivery - id = 4, delivery - tag = b"\x04", message - format = 0, settled = false, more = false, batchable = false]
EMPTY_TRANSFER_MORE_TRUE = bytearray(
b'\x00\x00\x00\x1c\x02\x00\x00\x00'
+ b'\x00\x53\x14\xc0\x0f\x0b\x43\x52\x04'
+ b'\xa0\x01\x04\x43\x42'
+ b'\x41\x40\x40\x40\x40\x42')
EMPTY_TRANSFER_MORE_FALSE = bytearray(
b'\x00\x00\x00\x1c\x02\x00\x00\x00'
+ b'\x00\x53\x14\xc0\x0f\x0b\x43\x52\x04'
+ b'\xa0\x01\x04\x43\x42'
+ b'\x42\x40\x40\x40\x40\x42')
s.sendall(EMPTY_TRANSFER_MORE_TRUE)
s.sendall(EMPTY_TRANSFER_MORE_FALSE)
sleep(0.5)
data = s.recv(1024)
# The delivery has been rejected.
self.assertIn("x00S%E", repr(data))
s.close()
class ConnectionLinkRouteTest(TestCase):
"""
Test connection scoped link route implementation
Base configuration:
+-----------------+
+---------+ +---------+<--| blocking_sender |
+-----------------+ | | | | +-----------------+
| Fake LR Service |<==>| QDR.A |<==>| QDR.B |
+-----------------+ | | | | +-------------------+
+---------+ +---------+-->| blocking_receiver |
+-------------------+
The Fake Link Route Service will create connection-scoped link routes to
QDR.A, while blocking sender/receivers on QDR.B will send/receive messages
via the link route.
"""
_AS_TYPE = "org.apache.qpid.dispatch.router.connection.linkRoute"
@classmethod
def setUpClass(cls):
super(ConnectionLinkRouteTest, cls).setUpClass()
b_port = cls.tester.get_port()
configs = [
# QDR.A:
[('router', {'mode': 'interior', 'id': 'QDR.A'}),
# for fake connection-scoped LRs:
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# for fake route-container LR connections:
('listener', {'role': 'route-container',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# to connect to the QDR.B
('connector', {'role': 'inter-router',
'host': '127.0.0.1',
'port': b_port,
'saslMechanisms': 'ANONYMOUS'})],
# QDR.B:
[('router', {'mode': 'interior', 'id': 'QDR.B'}),
# for client connections
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# for connection to QDR.A
('listener', {'role': 'inter-router',
'host': '0.0.0.0',
'port': b_port,
'saslMechanisms': 'ANONYMOUS'})]
]
cls.routers = []
for c in configs:
config = Qdrouterd.Config(c)
cls.routers.append(cls.tester.qdrouterd(config=config, wait=False))
cls.QDR_A = cls.routers[0]
cls.QDR_B = cls.routers[1]
cls.QDR_A.wait_router_connected('QDR.B')
cls.QDR_B.wait_router_connected('QDR.A')
def _get_address(self, mgmt, addr):
a_type = 'org.apache.qpid.dispatch.router.address'
return list(filter(lambda a: a['name'].endswith(addr),
mgmt.query(a_type)))
def test_config_file_bad(self):
# verify that specifying a connection link route in the configuration
# file fails
config = [('router', {'mode': 'interior', 'id': 'QDR.X'}),
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': self.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
('connection.linkRoute',
{'pattern': "i/am/bad",
'direction': "out"})
]
cfg = Qdrouterd.Config(config)
# we expect the router to fail
router = self.tester.qdrouterd("X", cfg, wait=False, expect=Process.EXIT_FAIL)
def test_mgmt(self):
# test create, delete, and query
mgmt_conn = BlockingConnection(self.QDR_A.addresses[0])
mgmt_proxy = ConnLinkRouteMgmtProxy(mgmt_conn)
for i in range(10):
rsp = mgmt_proxy.create_conn_link_route("lr1-%d" % i,
{'pattern': "*/hi/there/%d" % i,
'direction':
'out' if i % 2 else 'in'})
self.assertEqual(201, rsp.status_code)
# test query
rsp = mgmt_proxy.query_conn_link_routes()
self.assertEqual(200, rsp.status_code)
self.assertEqual(10, len(rsp.results))
entities = rsp.results
# test read
rsp = mgmt_proxy.read_conn_link_route('lr1-5')
self.assertEqual(200, rsp.status_code)
self.assertEqual("lr1-5", rsp.attrs['name'])
self.assertEqual("*/hi/there/5", rsp.attrs['pattern'])
self.assertEqual(mgmt_conn.container.container_id,
rsp.attrs['containerId'])
# bad creates
attrs = [{'pattern': "bad", 'direction': "bad"},
{'direction': 'in'},
{},
{'pattern': ''},
{'pattern': 7}]
for a in attrs:
rsp = mgmt_proxy.create_conn_link_route("iamnoone", a)
self.assertEqual(400, rsp.status_code)
# bad read
rsp = mgmt_proxy.read_conn_link_route('iamnoone')
self.assertEqual(404, rsp.status_code)
# bad delete
rsp = mgmt_proxy.delete_conn_link_route('iamnoone')
self.assertEqual(404, rsp.status_code)
# delete all
for r in entities:
self.assertEqual(200, r.status_code)
rsp = mgmt_proxy.delete_conn_link_route(r.attrs['name'])
self.assertEqual(204, rsp.status_code)
# query - should be none left
rsp = mgmt_proxy.query_conn_link_routes()
self.assertEqual(200, rsp.status_code)
self.assertEqual(0, len(rsp.results))
def test_address_propagation(self):
# test service that creates and deletes connection link routes
fs = ConnLinkRouteService(self.QDR_A.addresses[1], container_id="FakeService",
config=[("clr1",
{"pattern": "flea.*",
"direction": "out"}),
("clr2",
{"pattern": "flea.*",
"direction": "in"})])
self.assertEqual(2, len(fs.values))
# the address should propagate to A and B
self.QDR_A.wait_address(address="flea.*", count=2)
self.QDR_B.wait_address(address="flea.*", count=2)
# now have the service delete the config
fs.delete_config()
# eventually the addresses will be un-published
mgmt_A = QdManager(self, address=self.QDR_A.addresses[0])
mgmt_B = QdManager(self, address=self.QDR_B.addresses[0])
deadline = time() + TIMEOUT
while (self._get_address(mgmt_A, "flea.*")
or self._get_address(mgmt_B, "flea.*")):
self.assertTrue(time() < deadline)
sleep(0.1)
fs.join()
# simple forwarding tests with auto delete
def test_send_receive(self):
COUNT = 5
mgmt_A = QdManager(self, address=self.QDR_A.addresses[0])
mgmt_B = QdManager(self, address=self.QDR_B.addresses[0])
# connect broker to A route-container
fs = ConnLinkRouteService(self.QDR_A.addresses[1], container_id="FakeService",
config=[("clr1",
{"pattern": "flea.*",
"direction": "out"}),
("clr2",
{"pattern": "flea.*",
"direction": "in"})])
self.assertEqual(2, len(fs.values))
# wait for the address to propagate to B
self.QDR_B.wait_address(address="flea.*", count=2)
# ensure the link routes are not visible via other connections
clrs = mgmt_A.query(self._AS_TYPE)
self.assertEqual(0, len(clrs))
# send from A to B
r = AsyncTestReceiver(self.QDR_B.addresses[0],
"flea.B",
container_id="flea.BReceiver")
s = AsyncTestSender(self.QDR_A.addresses[0],
"flea.B",
container_id="flea.BSender",
message=Message(body="SENDING TO flea.B"),
count=COUNT)
s.wait() # for sender to complete
for i in range(COUNT):
self.assertEqual("SENDING TO flea.B",
r.queue.get(timeout=TIMEOUT).body)
r.stop()
self.assertEqual(COUNT, fs.in_count)
# send from B to A
r = AsyncTestReceiver(self.QDR_A.addresses[0],
"flea.A",
container_id="flea.AReceiver")
s = AsyncTestSender(self.QDR_B.addresses[0],
"flea.A",
container_id="flea.ASender",
message=Message(body="SENDING TO flea.A"),
count=COUNT)
s.wait()
for i in range(COUNT):
self.assertEqual("SENDING TO flea.A",
r.queue.get(timeout=TIMEOUT).body)
r.stop()
self.assertEqual(2 * COUNT, fs.in_count)
# once the fake service closes its conn the link routes
# are removed so the link route addresses must be gone
fs.join()
mgmt_A = QdManager(self, address=self.QDR_A.addresses[0])
mgmt_B = QdManager(self, address=self.QDR_B.addresses[0])
deadline = time() + TIMEOUT
while (self._get_address(mgmt_A, "flea.*")
or self._get_address(mgmt_B, "flea.*")):
self.assertTrue(time() < deadline)
sleep(0.1)
class ConnLinkRouteService(FakeBroker):
def __init__(self, url, container_id, config, timeout=TIMEOUT):
self.conn = None
self.mgmt_proxy = None
self.mgmt_sender = None
self.mgmt_receiver = None
self._config = config
self._config_index = 0
self._config_done = Event()
self._config_error = None
self._config_values = []
self._cleaning_up = False
self._delete_done = Event()
self._delete_count = 0
self._event_injector = EventInjector()
self._delete_event = ApplicationEvent("delete_config")
super(ConnLinkRouteService, self).__init__(url, container_id)
if self._config_done.wait(timeout) is False:
raise Exception("Timed out waiting for configuration setup")
if self._config_error is not None:
raise Exception("Error: %s" % self._config_error)
@property
def values(self):
return self._config_values
def delete_config(self):
self._event_injector.trigger(self._delete_event)
if self._delete_done.wait(TIMEOUT) is False:
raise Exception("Timed out waiting for configuration delete")
def on_start(self, event):
"""
Do not create an acceptor, actively connect instead
"""
event.container.selectable(self._event_injector)
self.conn = event.container.connect(self.url)
def on_connection_opened(self, event):
if event.connection == self.conn:
if self.mgmt_receiver is None:
self.mgmt_receiver = event.container.create_receiver(self.conn,
dynamic=True)
super(ConnLinkRouteService, self).on_connection_opened(event)
def on_connection_closed(self, event):
if self._event_injector:
self._event_injector.close()
self._event_injector = None
super(ConnLinkRouteService, self).on_connection_closed(event)
def on_link_opened(self, event):
if event.link == self.mgmt_receiver:
self.mgmt_proxy = MgmtMsgProxy(self.mgmt_receiver.remote_source.address)
self.mgmt_sender = event.container.create_sender(self.conn,
target="$management")
def on_link_error(self, event):
# when a remote client disconnects the service will get a link error
# that is expected - simply clean up the link
self.on_link_closing(event)
def on_sendable(self, event):
if event.sender == self.mgmt_sender:
if not self._cleaning_up:
if self._config_index < len(self._config):
cfg = self._config[self._config_index]
msg = self.mgmt_proxy.create_conn_link_route(cfg[0], cfg[1])
self.mgmt_sender.send(msg)
self._config_index += 1
elif self._config_values:
cv = self._config_values.pop()
msg = self.mgmt_proxy.delete_conn_link_route(cv['name'])
self._delete_count += 1
else:
super(ConnLinkRouteService, self).on_sendable(event)
def on_message(self, event):
if event.receiver == self.mgmt_receiver:
response = self.mgmt_proxy.response(event.message)
if response.status_code == 201:
# created:
self._config_values.append(response.attrs)
if len(self._config_values) == len(self._config):
self._config_done.set()
elif response.status_code == 204:
# deleted
self._delete_count -= 1
if (not self._config_values) and self._delete_count == 0:
self._delete_done.set()
else:
# error
self._config_error = ("mgmt failed: %s" %
response.status_description)
self._config_done.set()
self._delete_done.set()
else:
super(ConnLinkRouteService, self).on_message(event)
def on_delete_config(self, event):
if not self._cleaning_up:
self._cleaning_up = True
if not self._config_values:
self._delete_done.set()
else:
try:
while self.mgmt_sender.credit > 0:
cv = self._config_values.pop()
msg = self.mgmt_proxy.delete_conn_link_route(cv["name"])
self.mgmt_sender.send(msg)
self._delete_count += 1
except IndexError:
pass
class ConnLinkRouteMgmtProxy(object):
"""
Manage connection scoped link routes over a given connection.
While the connection remains open the connection scoped links will remain
configured and active
"""
def __init__(self, bconn, credit=250):
self._receiver = bconn.create_receiver(address=None, dynamic=True, credit=credit)
self._sender = bconn.create_sender(address="$management")
self._proxy = MgmtMsgProxy(self._receiver.link.remote_source.address)
def __getattr__(self, key):
# wrap accesses to the management message functions so we can send and
# receive the messages using the blocking links
f = getattr(self._proxy, key)
if not callable(f):
return f
def _func(*args, **kwargs):
self._sender.send(f(*args, **kwargs))
return self._proxy.response(self._receiver.receive())
return _func
class InvalidTagTest(MessagingHandler):
"""Verify that a message with an invalid tag length is rejected
"""
def __init__(self, router_addr):
super(InvalidTagTest, self).__init__(auto_accept=False, auto_settle=False)
self.test_conn = None
self.test_address = router_addr
self.tx_ct = 0
self.accept_ct = 0
self.reject_ct = 0
self.error = None
def timeout(self):
self.error = "Timeout expired: sent=%d rcvd=%d" % (self.tx_ct,
self.accept_ct
+ self.reject_ct)
if self.test_conn:
self.test_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.test_conn = event.container.connect(self.test_address)
rx = event.container.create_receiver(self.test_conn, "org.apache.foo")
def on_link_opened(self, event):
if event.receiver:
event.receiver.flow(100)
event.container.create_sender(event.connection, "org.apache.foo")
def on_sendable(self, event):
if self.tx_ct < 10:
self.tx_ct += 1
if self.tx_ct == 5:
event.sender.send(Message(body="YO"), tag=str("X" * 64))
else:
event.sender.send(Message(body="YO"), tag=str("BLAH%d" %
self.tx_ct))
def on_accepted(self, event):
self.accept_ct += 1
event.delivery.settle()
if self.accept_ct == 9 and self.reject_ct == 1:
event.connection.close()
self.timer.cancel()
def on_rejected(self, event):
self.reject_ct += 1
event.delivery.settle()
def on_message(self, event):
event.delivery.update(Delivery.ACCEPTED)
event.delivery.settle()
def run(self):
Container(self).run()
class Dispatch1428(TestCase):
"""
Sets up 2 routers (one of which are acting as brokers (QDR.A)).
QDR.A acting broker #1
+---------+ +---------+
| | <------ | |
| QDR.A | | QDR.B |
| | ------> | |
+---------+ +---------+
"""
@classmethod
def get_router(cls, index):
return cls.routers[index]
@classmethod
def setUpClass(cls):
"""Start two routers"""
super(Dispatch1428, cls).setUpClass()
def router(name, connection):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),
] + connection
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=False))
cls.routers = []
a_listener_port = cls.tester.get_port()
b_listener_port = cls.tester.get_port()
router('A',
[
('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
])
router('B',
[
('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': b_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
('connector', {'name': 'one', 'role': 'route-container', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}),
('connector', {'name': 'two', 'role': 'route-container', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'})
]
)
sleep(2)
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception("%s\n%s" % (e, out))
return out
def test_both_link_routes_active(self):
cmds = [
'CREATE --type=linkRoute name=foo prefix=foo direction=in connection=one',
'CREATE --type=linkRoute name=bar prefix=bar direction=in connection=two',
'CREATE --type=linkRoute name=baz prefix=baz direction=in containerId=QDR.A'
]
for c in cmds:
self.run_qdmanage(cmd=c, address=self.routers[1].addresses[0])
# Now that the qdmanage has run, query the link routes and make sure that their "operStatus" is "active" before
# running any of the tests.
long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
qd_manager = QdManager(self, address=self.routers[1].addresses[0])
for i in range(5):
all_link_routes_activated = True
link_routes = qd_manager.query(long_type)
for link_route in link_routes:
oper_status = link_route['operStatus']
if oper_status != "active":
all_link_routes_activated = False
break
if not all_link_routes_activated:
# One or more of the link routes have not been activated.
# Check after one second.
sleep(1)
else:
break
# All link routes created in this test MUST be activated before
# we can continue further testing.
self.assertTrue(all_link_routes_activated)
first = SendReceive("%s/foo" % self.routers[1].addresses[0], "%s/foo" % self.routers[0].addresses[0])
first.run()
self.assertEqual(None, first.error)
second = SendReceive("%s/bar" % self.routers[1].addresses[0], "%s/bar" % self.routers[0].addresses[0])
second.run()
self.assertEqual(None, second.error)
third = SendReceive("%s/baz" % self.routers[1].addresses[0], "%s/baz" % self.routers[0].addresses[0])
third.run()
self.assertEqual(None, third.error)
class SendReceive(MessagingHandler):
def __init__(self, send_url, recv_url, message=None):
super(SendReceive, self).__init__()
self.send_url = send_url
self.recv_url = recv_url
self.message = message or Message(body="SendReceiveTest")
self.sent = False
self.error = None
def close(self):
self.sender.close()
self.receiver.close()
self.sender.connection.close()
self.receiver.connection.close()
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.close()
def stop(self):
self.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
event.container.container_id = "SendReceiveTestClient"
self.sender = event.container.create_sender(self.send_url)
self.receiver = event.container.create_receiver(self.recv_url)
def on_sendable(self, event):
if not self.sent:
event.sender.send(self.message)
self.sent = True
def on_message(self, event):
if self.message.body != event.message.body:
self.error = "Incorrect message. Got %s, expected %s" % (event.message.body, self.message.body)
def on_accepted(self, event):
self.stop()
def run(self):
Container(self).run()
class DispositionSniffer(MessagingHandler):
"""
Capture the outgoing delivery after the remote has set its terminal
outcome. Used by tests that need to examine the delivery state
"""
def __init__(self, send_url):
super(DispositionSniffer, self).__init__(auto_accept=False,
auto_settle=False)
self.send_url = send_url
self.sender = None
self.timer = None
self.error = None
self.sent = False
self.delivery = None
def close(self):
if self.timer:
self.timer.cancel()
if self.sender:
self.sender.close()
self.sender.connection.close()
def timeout(self):
self.error = "Timeout Expired - Check for cores"
self.close()
def stop(self):
self.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender = event.container.create_sender(self.send_url)
def on_sendable(self, event):
if not self.sent:
event.sender.send(Message(body="HI"))
self.sent = True
def on_accepted(self, event):
self.stop()
def on_released(self, event):
self.delivery = event.delivery
self.close()
def on_modified(self, event):
self.delivery = event.delivery
self.close()
def on_rejected(self, event):
self.delivery = event.delivery
self.close()
def run(self):
Container(self).run()
class LinkRoute3Hop(TestCase):
"""
Sets up a linear 3 hop router network for testing multi-hop link routes.
+---------+ +---------+ +---------+ +------------------+
| | <------ | | <----- | |<----| blocking_senders |
| QDR.A | | QDR.B | | QDR.C | +------------------+
| | ------> | | ------> | | +--------------------+
+---------+ +---------+ +---------+---->| blocking_receivers |
^ +--------------------+
|
V
+-------------+
| FakeService |
+-------------+
"""
@classmethod
def setUpClass(cls):
super(LinkRoute3Hop, cls).setUpClass()
b_port = cls.tester.get_port()
configs = [
# QDR.A:
[('router', {'mode': 'interior', 'id': 'QDR.A'}),
# for client access
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# for fake service:
('listener', {'role': 'route-container',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# to connect to the QDR.B
('connector', {'role': 'inter-router',
'host': '127.0.0.1',
'port': b_port,
'saslMechanisms': 'ANONYMOUS'}),
# the routes
('linkRoute', {'prefix': 'closest/test-client', 'containerId': 'FakeService', 'direction': 'in'}),
('linkRoute', {'prefix': 'closest/test-client', 'containerId': 'FakeService', 'direction': 'out'})
],
# QDR.B:
[('router', {'mode': 'interior', 'id': 'QDR.B'}),
# for client connections
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# for inter-router connections from QDR.A and QDR.C
('listener', {'role': 'inter-router',
'host': '0.0.0.0',
'port': b_port,
'saslMechanisms': 'ANONYMOUS'}),
('linkRoute', {'prefix': 'closest/test-client', 'direction': 'in'}),
('linkRoute', {'prefix': 'closest/test-client', 'direction': 'out'})
],
# QDR.C
[('router', {'mode': 'interior', 'id': 'QDR.C'}),
# for client connections
('listener', {'role': 'normal',
'host': '0.0.0.0',
'port': cls.tester.get_port(),
'saslMechanisms': 'ANONYMOUS'}),
# to connect to the QDR.B
('connector', {'role': 'inter-router',
'host': '127.0.0.1',
'port': b_port,
'saslMechanisms': 'ANONYMOUS'}),
('linkRoute', {'prefix': 'closest/test-client', 'direction': 'in'}),
('linkRoute', {'prefix': 'closest/test-client', 'direction': 'out'})
]
]
cls.routers = []
for c in configs:
config = Qdrouterd.Config(c)
cls.routers.append(cls.tester.qdrouterd(config=config, wait=False))
cls.QDR_A = cls.routers[0]
cls.QDR_B = cls.routers[1]
cls.QDR_C = cls.routers[2]
cls.QDR_A.wait_router_connected('QDR.B')
cls.QDR_B.wait_router_connected('QDR.A')
cls.QDR_B.wait_router_connected('QDR.C')
cls.QDR_C.wait_router_connected('QDR.B')
cls.QDR_C.wait_router_connected('QDR.A')
cls.QDR_A.wait_router_connected('QDR.C')
def test_01_parallel_link_routes(self):
"""
Verify Q2/Q3 recovery in the case of multiple link-routes sharing the
same session.
"""
send_clients = 10
send_batch = 10
total = send_clients * send_batch
fake_service = FakeService(self.QDR_A.addresses[1],
container_id="FakeService")
self.QDR_C.wait_address("closest/test-client",
remotes=1)
env = None
rx = self.popen(["test-receiver",
"-a", self.QDR_C.addresses[0],
"-c", str(total),
"-s", "closest/test-client"],
env=env,
expect=Process.EXIT_OK)
def _spawn_sender(x):
return self.popen(["test-sender",
"-a", self.QDR_C.addresses[0],
"-c", str(send_batch),
"-i", "TestSender-%s" % x,
"-sx", # huge message size to trigger Q2/Q3
"-t", "closest/test-client"],
env=env,
expect=Process.EXIT_OK)
senders = [_spawn_sender(s) for s in range(send_clients)]
for tx in senders:
out_text, out_err = tx.communicate(timeout=TIMEOUT)
if tx.returncode:
raise Exception("Sender failed: %s %s" % (out_text, out_err))
if rx.wait(timeout=TIMEOUT):
raise Exception("Receiver failed to consume all messages in=%s out=%s",
fake_service.in_count,
fake_service.out_count)
fake_service.join()
self.assertEqual(total, fake_service.in_count)
self.assertEqual(total, fake_service.out_count)
self.QDR_C.wait_address_unsubscribed("closest/test-client")
def test_02_modified_outcome(self):
"""
Ensure all elements of a Modified disposition are passed thru the link
route
"""
class FakeServiceModified(FakeService):
def on_message(self, event):
# set non-default values for delivery state for delivery to
# remote endpoint
dlv = event.delivery
dlv.local.failed = True
dlv.local.undeliverable = True
dlv.local.annotations = {symbol("Key"): "Value"}
dlv.update(Delivery.MODIFIED)
dlv.settle()
fake_service = FakeServiceModified(self.QDR_A.addresses[1],
container_id="FakeService",
auto_accept=False,
auto_settle=False)
self.QDR_C.wait_address("closest/test-client",
remotes=1)
sniffer = DispositionSniffer("%s/closest/test-client" %
self.QDR_C.addresses[0])
sniffer.run()
self.assertEqual(None, sniffer.error)
state = sniffer.delivery.remote
self.assertTrue(state.failed)
self.assertTrue(state.undeliverable)
self.assertTrue(state.annotations is not None)
self.assertTrue(symbol('Key') in state.annotations)
self.assertEqual('Value', state.annotations[symbol('Key')])
fake_service.join()
self.QDR_C.wait_address_unsubscribed("closest/test-client")
def test_03_rejected_outcome(self):
"""
Ensure all elements of a Rejected disposition are passed thru the link
route
"""
class FakeServiceReject(FakeService):
def on_message(self, event):
# set non-default values for delivery state for delivery to
# remote endpoint
dlv = event.delivery
dlv.local.condition = Condition("condition-name",
str("condition-description"),
{symbol("condition"): "info"})
dlv.update(Delivery.REJECTED)
dlv.settle()
fake_service = FakeServiceReject(self.QDR_A.addresses[1],
container_id="FakeService",
auto_accept=False,
auto_settle=False)
self.QDR_C.wait_address("closest/test-client",
remotes=1)
sniffer = DispositionSniffer("%s/closest/test-client" %
self.QDR_C.addresses[0])
sniffer.run()
self.assertEqual(None, sniffer.error)
state = sniffer.delivery.remote
self.assertTrue(state.condition is not None)
self.assertEqual("condition-name", state.condition.name)
self.assertEqual("condition-description", state.condition.description)
self.assertTrue(state.condition.info is not None)
self.assertTrue(symbol("condition") in state.condition.info)
self.assertEqual('info', state.condition.info[symbol("condition")])
fake_service.join()
self.QDR_C.wait_address_unsubscribed("closest/test-client")
def test_04_extension_state(self):
"""
system_tests_two_routers.TwoRouterExtensionsStateTest() already tests
sending extended state via a link route.
"""
pass
if __name__ == '__main__':
unittest.main(main_module())
| {
"content_hash": "7f00e8cfa0c6ffb2afb0daa7bf769287",
"timestamp": "",
"source": "github",
"line_count": 2766,
"max_line_length": 187,
"avg_line_length": 42.369848156182215,
"alnum_prop": 0.5585818507615513,
"repo_name": "ganeshmurthy/qpid-dispatch",
"id": "2edc5095a1165608560a035d92654d32f93267ab",
"size": "117985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/system_tests_link_routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2695814"
},
{
"name": "C++",
"bytes": "359957"
},
{
"name": "CMake",
"bytes": "54018"
},
{
"name": "CSS",
"bytes": "49129"
},
{
"name": "Dockerfile",
"bytes": "3230"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "737682"
},
{
"name": "Objective-C",
"bytes": "1976"
},
{
"name": "Python",
"bytes": "2547017"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
} |
import theano.tensor as T
import theano.compile.sharedvalue as S
import numpy as np
from pymanopt import Problem
from pymanopt.manifolds import Euclidean
from pymanopt.solvers import TrustRegions
if __name__ == "__main__":
# Cost function is the squared reconstruction error
wT = T.matrix()
yT = S.shared(np.random.randn(1, 1))
XT = S.shared(np.random.randn(1, 1))
cost = T.sum((yT-wT.T.dot(XT))**2)
# A solver that involves the hessian
solver = TrustRegions()
# R^3
manifold = Euclidean(3, 1)
# Create the problem with extra cost function arguments
problem = Problem(manifold=manifold, cost=cost, arg=wT, verbosity=0)
# Solve 5 instances of the same type of problem for different data input
for k in range(0, 5):
# Generate random data
X = np.random.randn(3, 200)
Y = np.random.randn(1, 200)
yT.set_value(Y)
XT.set_value(X)
wopt = solver.solve(problem)
print('Run {}'.format(k+1))
print('Weights found by pymanopt (top) / '
'closed form solution (bottom)')
print(wopt.T)
print(np.linalg.inv(X.dot(X.T)).dot(X).dot(Y.T).T)
print('')
| {
"content_hash": "bc656e844f45de019ed5308bccd435de",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 30.615384615384617,
"alnum_prop": 0.6273031825795645,
"repo_name": "j-towns/pymanopt",
"id": "2520328772214bf30b0c2b7289a3c2018eff3679",
"size": "1194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/linreg_multiple_theano.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "210957"
}
],
"symlink_target": ""
} |
from autothreadharness.harness_case import HarnessCase
import unittest
class Leader_7_1_1(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '7 1 1'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9ef173993142856705f8912016ea3bd2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 54,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6643835616438356,
"repo_name": "erja-gp/openthread",
"id": "9e1e798379f4cf0c102d236ba8ace56df9f389cf",
"size": "1874",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tools/harness-automation/cases/leader_7_1_1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15850"
},
{
"name": "C",
"bytes": "940119"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "4306681"
},
{
"name": "Dockerfile",
"bytes": "6256"
},
{
"name": "M4",
"bytes": "63303"
},
{
"name": "Makefile",
"bytes": "133368"
},
{
"name": "Python",
"bytes": "2012919"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "74907"
}
],
"symlink_target": ""
} |
import os
import jinja2
from functools import wraps
from .models import Posts, Comments, Votes
template_dir = os.path.join(os.path.dirname(__file__), '../templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
def login_required(f):
"""
Decorator to see if user is logged in
Redirect to login page if not logged in
"""
@wraps(f)
def login(self, *a, **kw):
if self.user:
f(self, *a, **kw)
else:
self.redirect("/login")
return login
def not_logged_in(f):
"""
Decorator to see if user is NOT logged in.
Redirect to welcome page if logged in
"""
@wraps(f)
def logged_in(self, *a, **kw):
if self.user:
self.redirect("/welcome")
else:
f(self, *a, **kw)
return logged_in
def post_exists(f):
"""
Decorator to see if post exists
"""
@wraps(f)
def wrapper(self, post_id):
post = Posts.by_id(post_id)
if post:
return f(self, post_id, post)
else:
self.redirect("/")
return wrapper
def comment_exists(f):
"""
Decorator to see if comment exists
"""
@wraps(f)
def wrapper(self, comm_id):
comment = Comments.by_id(comm_id)
if comment:
return f(self, comm_id, comment)
else:
self.redirect("/")
return wrapper
def user_owns_post(f):
"""
Decorator to see if user is post author
"""
@wraps(f)
def wrapper(self, post_id, *a, **kw):
post = Posts.by_id(post_id)
if post.author.key().id() != self.uid():
self.redirect("/post/"+post_id)
else:
f(self, post_id, *a, **kw)
return wrapper
def user_owns_comment(f):
"""
Decorator to see if user is comment author
"""
@wraps(f)
def wrapper(self, comm_id, *a, **kw):
comment = Comments.by_id(comm_id)
if comment.author.key().id() != self.uid():
self.redirect("/post/"+str(comment.post.key().id()))
else:
f(self, comm_id, *a, **kw)
return wrapper
def vote_up(p, u):
if p.can_vote(u.key().id()):
v = Votes(post=p, user=u, vote=1)
v.put()
def vote_dn(p, u):
if p.can_vote(u.key().id()):
v = Votes(post=p, user=u, vote=-1)
v.put()
| {
"content_hash": "5b60f9ba162a6571affdf0c93d149a31",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 22.933333333333334,
"alnum_prop": 0.5352990033222591,
"repo_name": "stonescar/multi-user-blog",
"id": "e87665711b6d80cd3f0235f61539805982d3fbba",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogmods/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6647"
},
{
"name": "HTML",
"bytes": "7526"
},
{
"name": "JavaScript",
"bytes": "2855"
},
{
"name": "Python",
"bytes": "19217"
}
],
"symlink_target": ""
} |
import sys
from qpid.messaging import *
if __name__ == "__main__":
if len(sys.argv) < 5:
sys.exit(-1)
print 'app name {}, broke ip {}, broker port {}, queue id {}'.format(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
broker = "{}/xxxx@{}:{}".format(sys.argv[1], sys.argv[2], sys.argv[3])
address = "{}".format(sys.argv[4])
conn_options = {
'transport' : 'ssl',
'ssl_keyfile' : "ssl_cert_file/MSP.Key.pem",
'ssl_certfile' : "ssl_cert_file/MSP.pem.cer",
'ssl_trustfile' : "ssl_cert_file/Wireless Root CA.pem.cer",
'ssl_skip_hostname_check' : True,
}
connection = Connection(broker, **conn_options)
try:
connection.open()
session = connection.session()
receiver = session.receiver(address)
print "session create success"
while True:
message = receiver.fetch()
if len(sys.argv) == 6:
if message.content.find(sys.argv[5]):
print "%r" % message.content
else:
print "%r" % message.content
session.acknowledge()
except MessagingError, m:
print "MessagingError", m
connection.close() | {
"content_hash": "a30b17333fd16c54b004b6ba3d18bba5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 124,
"avg_line_length": 38.34285714285714,
"alnum_prop": 0.4992548435171386,
"repo_name": "SVADemoAPP/AmqpCode",
"id": "8e49d00781994bbe26b8260357c7cc5729066e59",
"size": "2156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/MQReceiver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8477"
},
{
"name": "C++",
"bytes": "70433"
},
{
"name": "Java",
"bytes": "12732"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "489612"
}
],
"symlink_target": ""
} |
from proxy_handler import *
import requests
from requests.exceptions import *
def grab_code_from_page(page_url, ip_address="0"):
"""
Function to get the source code of the url provided
:param ip_address: Proxy IP Address
:param page_url: URL of the page
:returns: string formatted source code
"""
# Acting as mozilla browser while requesting data from server
user_agent = "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11"
# Making a connection along with proxy handling
try:
if ip_address != "0":
my_proxy = get_proxy(ip_address)
response = requests.get(page_url, headers={'User-Agent': user_agent}, proxies=my_proxy)
else:
response = requests.get(page_url, headers={'User-Agent': user_agent})
if response.status_code == requests.codes.ok:
response.close()
return response.text
else:
print("Error {} occurred".format(response.status_code))
log_file = open("logs.txt", 'a')
log_file.write("\nError {} while retrieving {}\n".format(response.status_code, page_url))
log_file.close()
return 0
except ProxyError as e:
print("Unable to connect to {}\n".format(ip_address))
log_file = open("logs.txt", 'a')
log_file.write("\nException occurred while getting {}\nException: {}\n".format(page_url, e))
log_file.close()
exit()
except Exception as e:
print(e)
log_file = open("logs.txt", 'a')
log_file.write("\nException occurred while getting {}\nException: {}\n".format(page_url, e))
log_file.close()
exit()
| {
"content_hash": "f55fe5cc9fbc0399b184bad6cde1fafd",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 101,
"avg_line_length": 35.5,
"alnum_prop": 0.6050469483568075,
"repo_name": "Shivamohan07/snapdeal_crawler",
"id": "4323039a4afd238ff939fc9b0b6bd8af5f0424da",
"size": "1704",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "code_grabber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14562"
}
],
"symlink_target": ""
} |
"""``pytest_caselogger.py``
`Plugin is getting logs for each test case`
"""
import time
import pytest
from .pytest_helpers import get_tcname
from .pytest_onsenv import setup_scope
from testlib import caselogger
def pytest_addoption(parser):
"""Describe plugin specified options.
"""
group = parser.getgroup("log_enable", "plugin case logger")
group.addoption("--log_enable", action="store", default="True",
choices=["False", "True"],
help="Enable/Disable log tool for test (False | True). '%default' by default.")
group.addoption("--log_test", action="store", default="Failed",
choices=["Failed", "All"],
help="Choose test case result to store logs for (Failed | All). '%default' by default.")
group.addoption("--log_storage", action="store", default="none",
choices=["None", "Host"],
help="Where to store logs (None | Host). '%default' by default.")
group.addoption("--log_type", action="store", default="Single",
choices=["Single", "All"],
help="Store all logs or only for single test case (Single | All). '%default' by default.")
def pytest_configure(config):
"""Registering plugin.
"""
if config.option.log_enable == "True":
config.pluginmanager.register(CaseLoggerPlugin(), "_case_logger")
def pytest_unconfigure(config):
"""Unregistering plugin.
"""
case_logger = getattr(config, "_case_logger", None)
if case_logger == "True":
del config._case_logger
config.pluginmanager.unregister(case_logger)
class CaseLoggerExecutor(object):
"""Base class for TAF caselogger functionality.
"""
def __init__(self, env):
"""Initialize CaseLoggerExecutor instance.
Args:
env(testlib.common3.Environment): Environment instance from config
"""
self.node = None
self.env = env
self.log_storage = env.opts.log_storage
self.log_flag = env.opts.log_enable
def case_setup(self):
"""Add message into device logs on test case setup.
"""
self.log_timestamp = time.time() # pylint: disable=attribute-defined-outside-init
self.tc_name = get_tcname(self.node) # pylint: disable=attribute-defined-outside-init
# Make notice of test setup in log file.
for switch in list(self.env.switch.values()):
try:
switch.ui.logs_add_message("Notice", "[QA] Test %s execution started at %s" % (self.tc_name, self.log_timestamp))
except Exception as err:
self.log_flag = "False"
self.node.config.ctlogger.error("[Caselogger] Adding message to device logs failed: %s", err)
def case_teardown(self):
"""Add message into device logs on test case teardown. Copy test case logs to the log host.
"""
# Make notice of test teardown in log file.
for switch in list(self.env.switch.values()):
if switch.status:
try:
switch.ui.logs_add_message("Notice", "[QA] Test teardown at %s" % (self.log_timestamp, ))
except Exception as err:
self.node.config.ctlogger.error("[Caselogger] Adding message to device logs failed: %s", err)
# Get test case logs
if self.log_flag == "True":
if self.log_storage == "Host":
for switch in list(self.env.switch.values()):
case_logger = caselogger.CaseLogger(switch, self.node.config.ctlogger)
if self.env.opts.log_test == "Failed" and self.node.excinfo:
case_logger.get_test_case_logs(self.tc_name, self.log_timestamp, self.env.opts.log_type)
elif self.env.opts.log_test == "All":
case_logger.get_test_case_logs(self.tc_name, self.log_timestamp, self.env.opts.log_type)
def suite_teardown(self):
"""Copy core logs to the log host on test suite teardown.
"""
if self.log_flag == "True":
if self.log_storage == "Host":
for switch in list(self.env.switch.values()):
case_logger = caselogger.CaseLogger(switch, self.node.config.ctlogger)
# suit_name is defined below
case_logger.get_core_logs(self.suite_name) # pylint: disable=no-member
class CaseLoggerPlugin(object):
"""Base class for caselogger plugin functionality.
"""
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(self, item, call):
"""Add information about test case execution results.
"""
# execute all other hooks to obtain the report object
yield
# Add info about test cases execution results
if call.when == "call":
item.excinfo = call.excinfo
@pytest.fixture(autouse=True, scope=setup_scope())
def suitelogger(self, request, env_main):
"""Call caselogger on test suite teardown.
Args:
request(pytest.request): pytest request instance
env_main (testlib.common3.Environment): 'env_main' pytest fixture from pytest_onsenv.py
Returns:
CaseLoggerExecutor: instance of CaseLoggerExecutor class
"""
case_logger = CaseLoggerExecutor(env_main)
request.addfinalizer(case_logger.suite_teardown)
return case_logger
@pytest.fixture(autouse=True)
def caselogger(self, request, suitelogger, env):
"""Call caselogger on test case setup/teardown
Args:
request(pytest.request): pytest request instance
suitelogger: pytest fixture
env(testlib.common3.Environment): 'env' pytest fixture from pytest_onsenv.py
"""
suitelogger.node = request.node
suitelogger.suite_name = request.node.module.__name__
request.addfinalizer(suitelogger.case_teardown)
suitelogger.case_setup()
| {
"content_hash": "b3982128f7d4af5258bdd3ec87c43be5",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 129,
"avg_line_length": 36.66867469879518,
"alnum_prop": 0.6071956628881222,
"repo_name": "taf3/taf",
"id": "3d9a300af277c52ff1b62aa7ebe806a0ef182352",
"size": "6681",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taf/plugins/pytest_caselogger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6745"
},
{
"name": "Dockerfile",
"bytes": "4185"
},
{
"name": "JavaScript",
"bytes": "1771"
},
{
"name": "Python",
"bytes": "3859799"
},
{
"name": "Shell",
"bytes": "3146"
},
{
"name": "Tcl",
"bytes": "68098"
},
{
"name": "XSLT",
"bytes": "41538"
}
],
"symlink_target": ""
} |
import torch
import ocnn
import torch.nn
import torch.nn.functional as F
class OUNet(torch.nn.Module):
def __init__(self, depth, channel_in, nout, full_depth=2):
super().__init__()
self.depth = depth
self.channel_in = channel_in
self.nout = nout
self.full_depth = full_depth
self.nempty = False
self.resblk_num = 3
self.channels = [4, 512, 512, 256, 128, 64, 32, 16]
# encoder
self.conv1 = ocnn.OctreeConvBnRelu(
depth, channel_in, self.channels[depth], nempty=self.nempty)
self.encoder = torch.nn.ModuleList(
[ocnn.OctreeResBlocks(d, self.channels[d],
self.channels[d], self.resblk_num, nempty=self.nempty)
for d in range(depth, full_depth-1, -1)])
self.downsample = torch.nn.ModuleList(
[ocnn.OctreeConvBnRelu(d, self.channels[d],
self.channels[d-1], kernel_size=[2], stride=2, nempty=self.nempty)
for d in range(depth, full_depth, -1)])
# decoder
self.upsample = torch.nn.ModuleList(
[ocnn.OctreeDeConvBnRelu(d-1, self.channels[d-1],
self.channels[d], kernel_size=[2], stride=2, nempty=self.nempty)
for d in range(full_depth+1, depth + 1)])
self.decoder = torch.nn.ModuleList(
[ocnn.OctreeResBlocks(d, self.channels[d],
self.channels[d], self.resblk_num, nempty=self.nempty)
for d in range(full_depth+1, depth + 1)])
# header
self.predict = torch.nn.ModuleList(
[self._make_predict_module(self.channels[d], 2)
for d in range(full_depth, depth + 1)])
self.header = self._make_predict_module(self.channels[depth], nout)
def _make_predict_module(self, channel_in, channel_out=2, num_hidden=32):
return torch.nn.Sequential(
ocnn.OctreeConv1x1BnRelu(channel_in, num_hidden),
ocnn.OctreeConv1x1(num_hidden, channel_out, use_bias=True))
def get_input_feature(self, octree):
data = ocnn.octree_property(octree, 'feature', self.depth)
assert data.size(1) == self.channel_in
return data
def ocnn_encoder(self, octree):
depth, full_depth = self.depth, self.full_depth
data = self.get_input_feature(octree)
convs = dict()
convs[depth] = self.conv1(data, octree)
for i, d in enumerate(range(depth, full_depth-1, -1)):
convs[d] = self.encoder[i](convs[d], octree)
if d > full_depth:
convs[d-1] = self.downsample[i](convs[d], octree)
return convs
def ocnn_decoder(self, convs, octree_out, octree, return_deconvs=False):
output, deconvs = dict(), dict()
depth, full_depth = self.depth, self.full_depth
deconvs[full_depth] = convs[full_depth]
for i, d in enumerate(range(full_depth, depth+1)):
if d > full_depth:
deconvd = self.upsample[i-1](deconvs[d-1], octree_out)
skip, _ = ocnn.octree_align(convs[d], octree, octree_out, d)
deconvd = deconvd + skip
deconvs[d] = self.decoder[i-1](deconvd, octree_out)
# predict the splitting label
logit = self.predict[i](deconvs[d])
logit = logit.squeeze().t() # (1, C, H, 1) -> (H, C)
# classification loss
label_gt = ocnn.octree_property(octree_out, 'split', d).long()
output['loss_%d' % d] = F.cross_entropy(logit, label_gt)
output['accu_%d' % d] = logit.argmax(1).eq(label_gt).float().mean()
if d == depth:
# predict the signal
signal = self.header(deconvs[d])
signal = torch.tanh(signal)
# regression loss
signal_gt = ocnn.octree_property(octree_out, 'feature', d)
output['loss_reg%d' % d] = torch.mean((signal_gt - signal)**2)
return (output, deconvs) if return_deconvs else output
def decode_shape(self, convs, octree, return_deconvs=False):
deconvs = dict()
depth, full_depth = self.depth, self.full_depth
octree_out = ocnn.create_full_octree(full_depth, self.nout)
deconvs[full_depth] = convs[full_depth]
for i, d in enumerate(range(full_depth, depth+1)):
if d > full_depth:
deconvd = self.upsample[i-1](deconvs[d-1], octree_out)
skip, _ = ocnn.octree_align(convs[d], octree, octree_out, d)
deconvd = deconvd + skip
deconvs[d] = self.decoder[i-1](deconvd, octree_out)
# predict the splitting label
logit = self.predict[i](deconvs[d])
logit = logit.squeeze().t() # (1, C, H, 1) -> (H, C)
# octree splitting
label = logit.argmax(1).to(torch.int32)
octree_out = ocnn.octree_update(octree_out, label, d, split=1)
if d < depth:
octree_out = ocnn.octree_grow(octree_out, target_depth=d+1)
# predict the signal
else:
signal = self.header(deconvs[d]) # (1, C, H, 1)
signal = torch.tanh(signal)
normal = F.normalize(signal[:, :3], dim=1)
signal = torch.cat([normal, signal[:, 3:]], dim=1)
octree_out = ocnn.octree_set_property(octree_out, signal, d)
return (octree_out, deconvs) if return_deconvs else octree_out
def forward(self, octree_in, octree_gt=None, run='compute_loss'):
convs = self.ocnn_encoder(octree_in)
if 'compute_loss' == run:
assert octree_gt is not None
output = self.ocnn_decoder(convs, octree_gt, octree_in)
elif 'decode_shape' == run:
output = self.decode_shape(convs, octree_in)
else:
raise ValueError
return output
| {
"content_hash": "93ec23b7e747f0e6d99039398d32beac",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 75,
"avg_line_length": 37.61267605633803,
"alnum_prop": 0.6264744429882044,
"repo_name": "microsoft/O-CNN",
"id": "35a2626909fac7f20b8d715eb34f7d131741cccc",
"size": "5341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch/ocnn/ounet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1028"
},
{
"name": "C++",
"bytes": "1168252"
},
{
"name": "CMake",
"bytes": "12150"
},
{
"name": "Cuda",
"bytes": "107918"
},
{
"name": "Dockerfile",
"bytes": "2505"
},
{
"name": "MATLAB",
"bytes": "989"
},
{
"name": "Python",
"bytes": "379722"
}
],
"symlink_target": ""
} |
"""Remove error field from task_queue
Revision ID: 5686f64a46c8
Revises: dd35678aae90
Create Date: 2020-10-27 14:06:58.794701
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5686f64a46c8"
down_revision = "dd35678aae90"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("task_queue", "error")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("task_queue", sa.Column("error", sa.VARCHAR(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| {
"content_hash": "5837681fd9a3edc6df535995b20ab0e9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 101,
"avg_line_length": 25,
"alnum_prop": 0.6914285714285714,
"repo_name": "psi4/DatenQM",
"id": "76d65a4555e5f9a115671ab69ca01295537d0ce7",
"size": "700",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qcfractal/alembic/versions/5686f64a46c8_remove_error_field_from_task_queue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8266"
},
{
"name": "Python",
"bytes": "145536"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = math_ops.reduce_sum(x).eval()
self.assertEqual(y_tf, 21)
def testReduceExplicitDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegexp(ValueError, "must be at most rank 1"):
math_ops.reduce_sum(x, axis)
class LogSumExpTest(test_util.TensorFlowTestCase):
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
y_np = log(np.sum(exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0])
y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = y_tf.eval()
self.assertAllClose(y_tf_np, y_np)
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np, keep_dims=True).eval()
self.assertEqual(y_tf_np.ndim, x_np.ndim)
y_np = log(np.sum(exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)
def testOverflow(self):
x = [1000, 1001, 1002, 1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegexp(RuntimeWarning,
"overflow encountered in exp"):
out = log(np.sum(exp(x_np)))
if out == np.inf:
raise RuntimeWarning("overflow encountered in exp")
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
y_np = log(np.sum(exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testUnderflow(self):
x = [-1000, -1001, -1002, -1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegexp(RuntimeWarning,
"divide by zero encountered in log"):
out = log(np.sum(exp(x_np)))
if out == -np.inf:
raise RuntimeWarning("divide by zero encountered in log")
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
y_np = log(np.sum(exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
x = [0.49, 0.7, -0.3, -0.8]
# TODO(nolivia): Remove this when RoundOp is forwards compatible
# x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = y_tf.eval()
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
for dtype in [np.int32, np.float16]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
y = np.array([-3, -2, -1], dtype=dtype)
z = (x - y)*(x - y)
with self.test_session(use_gpu=True):
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
class ScalarMulTest(test_util.TensorFlowTestCase):
def testAcceptsRefs(self):
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.initialize_all_variables()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
self.assertEqual(30, result.eval())
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with self.test_session(use_gpu=True):
self.assertEqual(30, result.eval())
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
expected = array_ops.ones([10, 10]) * 3
with self.test_session(use_gpu=True):
self.assertAllEqual(expected.eval(), result.eval())
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])
x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
with self.test_session(use_gpu=True):
self.assertAllEqual(x.values.eval(), [[-6, -9], [-15, -21], [0, 3]])
self.assertAllEqual(x.indices.eval(), [0, 2, 5])
class AccumulateNTest(test_util.TensorFlowTestCase):
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
for u in tf_x:
print("shape=%s" % u.get_shape())
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
class DivAndModTest(test_util.TensorFlowTestCase):
# TODO(aselle): Test more types before exposing new division operators.
def intTestData(self):
nums = np.arange(-10, 10, 1).reshape(20, 1)
divs = np.arange(-3, 4, 2).reshape(1, 4)
return nums, divs
def floatTestData(self):
nums = np.arange(-10, 10, .25).reshape(80, 1)
divs = np.arange(-3, 0, .25).reshape(1, 12)
return nums, divs
def testFloorModInt(self):
nums, divs = self.intTestData()
with self.test_session():
# TODO(aselle): Change test to use % after switch
# tf_result = math_ops.floor_mod(nums, divs).eval()
tf_result = gen_math_ops.floor_mod(nums, divs).eval()
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
def testFloorModFloat(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.floor_mod(nums, divs).eval()
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
# TODO(aselle): put this test in once % switched to floormod
# tf2_result = (array_ops.constant(nums)
# % array_ops.constant(divs)).eval()
# self.assertAllEqual(tf2_result, tf_result)
def testDivideInt(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = math_ops.floor_div(nums, divs).eval()
np_result = nums // divs
self.assertAllEqual(tf_result, np_result)
# TODO(aselle): Put this test in once // is switched to floordiv
# tf2_result = (array_ops.constant(nums)
# // array_ops.constant(divs)).eval()
# self.assertAllEqual(tf2_result, tf_result)
def testConsistent(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = (
math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
).eval()
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
self.assertAllEqual(tf_result, np_result)
self.assertAllEqual(tf_result, tf2_result)
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "a07d7753684f036431edf47f5e00d047",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 36.31481481481482,
"alnum_prop": 0.6216216216216216,
"repo_name": "anand-c-goog/tensorflow",
"id": "8b40c8599e5cfc0e77c50772ba215921e9c5e157",
"size": "10495",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/math_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "92986"
},
{
"name": "C++",
"bytes": "13344601"
},
{
"name": "CMake",
"bytes": "72160"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "80353"
},
{
"name": "HTML",
"bytes": "522326"
},
{
"name": "Java",
"bytes": "48444"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1833435"
},
{
"name": "Makefile",
"bytes": "23482"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "138797"
},
{
"name": "Python",
"bytes": "12649070"
},
{
"name": "Shell",
"bytes": "270477"
},
{
"name": "TypeScript",
"bytes": "705307"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment, Hen, Chick, A, B, C)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b = Building.objects.create(name='101')
dev1 = Device.objects.create(name="router", building=b)
dev2 = Device.objects.create(name="switch", building=b)
dev3 = Device.objects.create(name="server", building=b)
port1 = Port.objects.create(port_number='4', device=dev1)
port2 = Port.objects.create(port_number='7', device=dev2)
port3 = Port.objects.create(port_number='1', device=dev3)
c1 = Connection.objects.create(start=port1, end=port2)
c2 = Connection.objects.create(start=port2, end=port3)
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person=usp)
o = Organizer.objects.create(person=uop)
c = Class.objects.create(org=o)
Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
Item.objects.create(name="item1", child=c1)
Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertFalse('LEFT OUTER' in str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertTrue('LEFT OUTER' in str(qs.query))
def test_regression_19870(self):
hen = Hen.objects.create(name='Hen')
Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_regression_10733(self):
a = A.objects.create(name='a', lots_of_text='lots_of_text_a', a_field='a_field')
b = B.objects.create(name='b', lots_of_text='lots_of_text_b', b_field='b_field')
c = C.objects.create(name='c', lots_of_text='lots_of_text_c', is_published=True,
c_a=a, c_b=b)
results = C.objects.all().only('name', 'lots_of_text', 'c_a', 'c_b', 'c_b__lots_of_text',
'c_a__name', 'c_b__name').select_related()
self.assertQuerysetEqual(results, [c], lambda x: x)
with self.assertNumQueries(0):
qs_c = results[0]
self.assertEqual(qs_c.name, 'c')
self.assertEqual(qs_c.lots_of_text, 'lots_of_text_c')
self.assertEqual(qs_c.c_b.lots_of_text, 'lots_of_text_b')
self.assertEqual(qs_c.c_a.name, 'a')
self.assertEqual(qs_c.c_b.name, 'b')
def test_regression_22508(self):
building = Building.objects.create(name='101')
device = Device.objects.create(name="router", building=building)
Port.objects.create(port_number='1', device=device)
device = Device.objects.get()
port = device.port_set.select_related('device__building').get()
with self.assertNumQueries(0):
port.device.building
| {
"content_hash": "5e2912cfda4dec5cd887b490f40a1d36",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 131,
"avg_line_length": 47.974489795918366,
"alnum_prop": 0.6424545357864512,
"repo_name": "lecaoquochung/ddnb.django",
"id": "4a498d707a5b7ad6ab4d08080d4ef71cd673397d",
"size": "9403",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "tests/select_related_regress/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53353"
},
{
"name": "JavaScript",
"bytes": "102434"
},
{
"name": "Python",
"bytes": "9796233"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
"""
Elliptic Curve Library
Copyright 2017 Ivan Sarno
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ECL.utility import *
import random
__author__ = 'ivansarno'
__version__ = 'V.1.0'
def test_inverse() -> bool:
t = 0
while t == 0:
t = random.randint(0, 30)
return (inverse(t, 31) * t) % 31 == 1
def test_is_square() -> bool:
t = 0
while t == 0:
t = random.randint(0, 30)
s = t*t % 31
return is_square(s, 31)
def test_square_root() -> bool:
t = 0
while t == 0:
t = random.randint(0, 7)
s = square_root(t * t % 7, 7)
r1 = s == t or 7 - t == s
t = random.randint(0, 13)
while t == 0:
t = random.randint(0, 12)
s = square_root(t * t % 13, 13)
r2 = s == t or 13 - t == s
t = random.randint(0, 16)
while t == 0:
t = random.randint(0, 16)
s = square_root(t * t % 17, 17)
r3 = s == t or 17 - t == s
return r1 and r2 and r3
def test_utility() -> bool:
return test_inverse() and test_is_square() and test_square_root()
| {
"content_hash": "f8e7437243943e80f1bf44996fe2847e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 72,
"avg_line_length": 23.723076923076924,
"alnum_prop": 0.6076523994811932,
"repo_name": "ivansarno/EllipticCurvesLibrary",
"id": "f861ce3189e1dd7d21ec929b0f6762295da6aeb6",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utility_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "58054"
}
],
"symlink_target": ""
} |
"""EasyEngine core variable module"""
import platform
import socket
import configparser
import os
import sys
import psutil
import datetime
class EEVariables():
"""Intialization of core variables"""
# EasyEngine version
ee_version = "3.2.2"
# EasyEngine packages versions
ee_wp_cli = "0.19.1"
ee_adminer = "4.2.1"
ee_roundcube = "1.1.1"
ee_vimbadmin = "3.0.11"
# Current date and time of System
ee_date = datetime.datetime.now().strftime('%d%b%Y%H%M%S')
# EasyEngine core variables
ee_platform_distro = platform.linux_distribution()[0].lower()
ee_platform_version = platform.linux_distribution()[1]
ee_platform_codename = os.popen("lsb_release -sc | tr -d \'\\n\'").read()
# Get timezone of system
if os.path.isfile('/etc/timezone'):
with open("/etc/timezone", "r") as tzfile:
ee_timezone = tzfile.read().replace('\n', '')
if ee_timezone == "Etc/UTC":
ee_timezone = "UTC"
else:
ee_timezone = "UTC"
# Get FQDN of system
ee_fqdn = socket.getfqdn()
# EasyEngien default webroot path
ee_webroot = '/var/www/'
# PHP5 user
ee_php_user = 'www-data'
# Get git user name and EMail
config = configparser.ConfigParser()
config.read(os.path.expanduser("~")+'/.gitconfig')
try:
ee_user = config['user']['name']
ee_email = config['user']['email']
except Exception as e:
ee_user = input("Enter your name: ")
ee_email = input("Enter your email: ")
os.system("git config --global user.name {0}".format(ee_user))
os.system("git config --global user.email {0}".format(ee_email))
# Get System RAM and SWAP details
ee_ram = psutil.virtual_memory().total / (1024 * 1024)
ee_swap = psutil.swap_memory().total / (1024 * 1024)
# MySQL hostname
ee_mysql_host = ""
config = configparser.RawConfigParser()
cnfpath = os.path.expanduser("~")+"/.my.cnf"
if [cnfpath] == config.read(cnfpath):
try:
ee_mysql_host = config.get('client', 'host')
except configparser.NoOptionError as e:
ee_mysql_host = "localhost"
else:
ee_mysql_host = "localhost"
# EasyEngine stack installation varibales
# Nginx repo and packages
if ee_platform_codename == 'precise':
ee_nginx_repo = ("deb http://download.opensuse.org/repositories/home:"
"/rtCamp:/EasyEngine/xUbuntu_12.04/ /")
elif ee_platform_codename == 'trusty':
ee_nginx_repo = ("deb http://download.opensuse.org/repositories/home:"
"/rtCamp:/EasyEngine/xUbuntu_14.04/ /")
elif ee_platform_codename == 'wheezy':
ee_nginx_repo = ("deb http://download.opensuse.org/repositories/home:"
"/rtCamp:/EasyEngine/Debian_7.0/ /")
elif ee_platform_codename == 'jessie':
ee_nginx_repo = ("deb http://download.opensuse.org/repositories/home:"
"/rtCamp:/EasyEngine/Debian_8.0/ /")
ee_nginx = ["nginx-custom", "nginx-common"]
ee_nginx_key = '3050AC3CD2AE6F03'
# PHP repo and packages
if ee_platform_distro == 'ubuntu':
ee_php_repo = "ppa:ondrej/php5-5.6"
elif ee_platform_codename == 'wheezy':
ee_php_repo = ("deb http://packages.dotdeb.org {codename}-php56 all"
.format(codename=ee_platform_codename))
ee_php = ["php5-fpm", "php5-curl", "php5-gd", "php5-imap",
"php5-mcrypt", "php5-common", "php5-readline",
"php5-mysql", "php5-cli", "php5-memcache", "php5-imagick",
"memcached", "graphviz", "php-pear"]
if ee_platform_codename == 'wheezy':
ee_php = ee_php + ["php5-dev"]
if ee_platform_distro == 'ubuntu' or ee_platform_codename == 'jessie':
ee_php = ee_php + ["php5-xdebug"]
# MySQL repo and packages
if ee_platform_distro == 'ubuntu':
ee_mysql_repo = ("deb http://mirror.aarnet.edu.au/pub/MariaDB/repo/"
"10.0/ubuntu {codename} main"
.format(codename=ee_platform_codename))
elif ee_platform_distro == 'debian':
ee_mysql_repo = ("deb http://mirror.aarnet.edu.au/pub/MariaDB/repo/"
"10.0/debian {codename} main"
.format(codename=ee_platform_codename))
ee_mysql = ["mariadb-server", "percona-toolkit"]
# Postfix repo and packages
ee_postfix_repo = ""
ee_postfix = ["postfix"]
# Mail repo and packages
ee_mail_repo = ("deb http://http.debian.net/debian-backports {codename}"
"-backports main".format(codename=ee_platform_codename))
ee_mail = ["dovecot-core", "dovecot-imapd", "dovecot-pop3d",
"dovecot-lmtpd", "dovecot-mysql", "dovecot-sieve",
"dovecot-managesieved", "postfix-mysql", "php5-cgi",
"php-gettext", "php-pear"]
# Mailscanner repo and packages
ee_mailscanner_repo = ()
ee_mailscanner = ["amavisd-new", "spamassassin", "clamav", "clamav-daemon",
"arj", "zoo", "nomarch", "lzop", "cabextract", "p7zip",
"rpm", "unrar-free"]
# HHVM repo details
# 12.04 requires boot repository
if ee_platform_distro == 'ubuntu':
if ee_platform_codename == "precise":
ee_boost_repo = ("ppa:mapnik/boost")
ee_hhvm_repo = ("deb http://dl.hhvm.com/ubuntu {codename} main"
.format(codename=ee_platform_codename))
else:
ee_hhvm_repo = ("deb http://dl.hhvm.com/debian {codename} main"
.format(codename=ee_platform_codename))
ee_hhvm = ["hhvm"]
# Repo path
ee_repo_file = "ee-repo.list"
ee_repo_file_path = ("/etc/apt/sources.list.d/" + ee_repo_file)
# Application dabase file path
basedir = os.path.abspath(os.path.dirname('/var/lib/ee/'))
ee_db_uri = 'sqlite:///' + os.path.join(basedir, 'ee.db')
def __init__(self):
pass
| {
"content_hash": "b0093a027c8c421184b1dacd34bcf4b1",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 36.33734939759036,
"alnum_prop": 0.5830570291777188,
"repo_name": "rjdp/EE-dbmigrate",
"id": "c615dc0b7b3a91178a82dc211b4fab3b04cb9d41",
"size": "6032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ee/core/variables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "58584"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "422130"
},
{
"name": "Shell",
"bytes": "19984"
}
],
"symlink_target": ""
} |
"""Plugged In Status Support for the Nissan Leaf."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import DATA_CHARGING, DATA_LEAF, DATA_PLUGGED_IN, LeafEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up of a Nissan Leaf binary sensor."""
if discovery_info is None:
return
devices = []
for vin, datastore in hass.data[DATA_LEAF].items():
_LOGGER.debug("Adding binary_sensors for vin=%s", vin)
devices.append(LeafPluggedInSensor(datastore))
devices.append(LeafChargingSensor(datastore))
add_entities(devices, True)
class LeafPluggedInSensor(LeafEntity, BinarySensorDevice):
"""Plugged In Sensor class."""
@property
def name(self):
"""Sensor name."""
return "{} {}".format(self.car.leaf.nickname, "Plug Status")
@property
def is_on(self):
"""Return true if plugged in."""
return self.car.data[DATA_PLUGGED_IN]
@property
def icon(self):
"""Icon handling."""
if self.car.data[DATA_PLUGGED_IN]:
return "mdi:power-plug"
return "mdi:power-plug-off"
class LeafChargingSensor(LeafEntity, BinarySensorDevice):
"""Charging Sensor class."""
@property
def name(self):
"""Sensor name."""
return "{} {}".format(self.car.leaf.nickname, "Charging Status")
@property
def is_on(self):
"""Return true if charging."""
return self.car.data[DATA_CHARGING]
@property
def icon(self):
"""Icon handling."""
if self.car.data[DATA_CHARGING]:
return "mdi:flash"
return "mdi:flash-off"
| {
"content_hash": "84911abac8854cba37f347f83af9de9f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 72,
"avg_line_length": 27.109375,
"alnum_prop": 0.6334293948126801,
"repo_name": "leppa/home-assistant",
"id": "1ee450df87d0ad3b70274869632af9ab9ca86454",
"size": "1735",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/nissan_leaf/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from google.cloud import gke_backup_v1
async def sample_update_restore():
# Create a client
client = gke_backup_v1.BackupForGKEAsyncClient()
# Initialize request argument(s)
restore = gke_backup_v1.Restore()
restore.backup = "backup_value"
request = gke_backup_v1.UpdateRestoreRequest(
restore=restore,
)
# Make the request
operation = client.update_restore(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END gkebackup_v1_generated_BackupForGKE_UpdateRestore_async]
| {
"content_hash": "d2eacb6455fe5eb4aff634d370cd1edc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 24.153846153846153,
"alnum_prop": 0.7006369426751592,
"repo_name": "googleapis/python-gke-backup",
"id": "c5e3115c4b0937d8f3bf48e9f9a106901178bce6",
"size": "2018",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/gkebackup_v1_generated_backup_for_gke_update_restore_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1275539"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
''' Tests for chronicler.decorators.audits '''
from mock import Mock
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from chronicler.models import AuditItem
from chronicler.decorators import audits
from chronicler.tests import TestCase
from chronicler.tests.models import Person, Group
@audits(Person, ['group_set'], 'pk', 'person_pk', 'POST')
def fake_view_post(request):
pass
@audits(Person, ['group_set'], 'pk', 'person_pk', 'GET')
def fake_view_get(request):
pass
@audits(Person, ['group_set'], 'pk', 'person_pk')
def fake_view(request, person_pk):
pass
@audits(Person, ['group_set'], 'pk', 'person_pk', force=True)
def fake_view_force(request, person_pk):
pass
class TestCreateAuditEntry(TestCase):
def setUp(self):
super(TestCreateAuditEntry, self).setUp()
self.user, _ = User.objects.get_or_create(username='analyte')
self.content_type = ContentType.objects.get_for_model(Person)
self.person = Person.objects.create(name='Tester')
def test_create_post_key(self):
''' Test that asserts we can get our object from the POST variables
when necessary
'''
assert not AuditItem.objects.all()
request = Mock(POST={'person_pk': self.person.pk}, user=self.user)
fake_view_post(request)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
def test_create_get_key(self):
''' Test that asserts we can get our object from the GET variables
when necessary
'''
assert not AuditItem.objects.all()
request = Mock(GET={'person_pk': self.person.pk}, user=self.user)
fake_view_get(request)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
def test_create_simple_view(self):
''' Test that proves that we can grab our necessary data to get an
object from the request path
'''
assert not AuditItem.objects.all()
request = Mock(user=self.user)
fake_view(request, person_pk=self.person.pk)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
def test_prevent_audit_dupes(self):
''' Test that asserts that when nothing changes, we don't create
another audit item with identical changes
'''
assert not AuditItem.objects.all()
request = Mock(user=self.user)
fake_view(request, person_pk=self.person.pk)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
fake_view(request, person_pk=self.person.pk)
audit_items = AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
self.assertEqual(audit_items.count(), 1)
def test_audits_force_create_dupes(self):
''' Test that asserts that even when we find nothing changes,
that we will create a dupe if force is set to True
'''
assert not AuditItem.objects.all()
request = Mock(user=self.user)
fake_view(request, person_pk=self.person.pk)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
fake_view_force(request, person_pk=self.person.pk)
audit_items = AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
self.assertEqual(audit_items.count(), 2)
| {
"content_hash": "9a53c727b42d732b554f316f890eabda",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 75,
"avg_line_length": 34.62264150943396,
"alnum_prop": 0.6452316076294278,
"repo_name": "analytehealth/chronicler",
"id": "46d63a78bf9593fc08d8c6d9f37dfe00cf696ae0",
"size": "3670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chronicler/tests/test_audit_decorator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18299"
}
],
"symlink_target": ""
} |
__author__ = 'swhite'
# Non-admin non-project specific web views (not REST API views)
REGULAR_WEB_VIEWS = (
'home',
'view_markers',
'view_fluorochromes',
'view_parameters',
'view_specimens',
'add_project',
'view_sample_groups',
)
# Admin views not tied to a project and not REST API views
ADMIN_WEB_VIEWS = (
'add_marker',
'add_fluorochrome',
'add_parameter',
'add_specimen',
'add_sample_group',
)
| {
"content_hash": "5d2b0252cc28e6352e7bba6d8357f808",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.625,
"repo_name": "whitews/ReFlow",
"id": "3c0e5b0f6ab2223820409a45eecfd2df1de5a595",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repository/tests/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "131522"
},
{
"name": "Dockerfile",
"bytes": "327"
},
{
"name": "HTML",
"bytes": "424958"
},
{
"name": "JavaScript",
"bytes": "1177023"
},
{
"name": "Python",
"bytes": "254904"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0); | {
"content_hash": "eb3a565440506f38802fd7df72b6722d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 170,
"avg_line_length": 38.57142857142857,
"alnum_prop": 0.7111111111111111,
"repo_name": "antoinecarme/pyaf",
"id": "7e06d6048393ea4fc68675e2a3f70426328d7e21",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Quantization/trend_MovingMedian/cycle_0/ar_/test_artificial_32_Quantization_MovingMedian_0__100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
Generator classes to produce random data with specific properties.
"""
import datetime as dt
import logging
import numpy as np
import shapely
from faker import Faker
from functools import partial
from itertools import count, islice
from queue import Queue, Full
from random import Random
from shapely.geometry import Point, Polygon, MultiPolygon
from .base import IndependentGenerator
from .derived_generators import ExtractAttribute
from .item_list import ItemList
__all__ = [
'CharString', 'Constant', 'DigitString', 'FakerGenerator', 'First', 'Float', 'Geolocation',
'GeolocationPair', 'GeoJSONGeolocationPair', 'HashDigest', 'Integer', 'IterateOver', 'Nth', 'NumpyRandomGenerator', 'Second',
'SeedGenerator', 'SelectMultiple', 'SelectOne', 'Sequential', 'Split', 'Subsample', 'Timestamp', 'TimestampNEW',
'TimestampError', 'TupleGenerator', 'Zip'
]
logger = logging.getLogger("tohu")
# Note: It would be better to make this an abstract base class
# (to enforce the interface in subclasses) rather than
# raising NotImplementedError for methods that are not
# provided by subclasses, but somehow this interferes with
# the metaclass CustomGeneratorMeta below.
#
class BaseGenerator(IndependentGenerator):
"""
Base class for all of tohu's random generators.
"""
class TupleGenerator(BaseGenerator):
"""
Abstract base class
"""
@property
def tuple_len(self):
"""
Length of tuples produced by this generator.
"""
try:
return self._tuple_len
except AttributeError:
raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__))
@tuple_len.setter
def tuple_len(self, value):
self._tuple_len = value
class Constant(BaseGenerator):
"""
Generator which produces a constant sequence (repeating the same value indefinitely).
"""
def __init__(self, value):
"""
Parameters
----------
value:
The constant value produced by this generator.
"""
self.value = value
def _spawn(self):
return Constant(self.value)
def reset(self, seed=None):
return self
def __next__(self):
return self.value
class Integer(BaseGenerator):
"""
Generator which produces random integers k in the range low <= k <= high.
"""
def __init__(self, low, high):
"""
Parameters
----------
low: integer
Lower bound (inclusive).
high: integer
Upper bound (inclusive).
"""
self.low = low
self.high = high
self.randgen = Random()
def _spawn(self):
return Integer(self.low, self.high)
def reset(self, seed):
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.randint(self.low, self.high)
class Float(BaseGenerator):
"""
Generator which produces random floating point numbers x in the range low <= x <= high.
"""
def __init__(self, low, high):
"""
Parameters
----------
low: integer
Lower bound (inclusive).
high: integer
Upper bound (inclusive).
"""
self.low = low
self.high = high
self.randgen = Random()
def _spawn(self):
return Float(self.low, self.high)
def reset(self, seed):
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.uniform(self.low, self.high)
class NumpyRandomGenerator(BaseGenerator):
"""
Generator which produces random numbers using one of the methods supported by numpy. [1]
[1] https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
def __init__(self, method, **numpy_args):
"""
Parameters
----------
method: string
Name of the numpy function to use (see [1] for details)
numpy_args:
Remaining arguments passed to the numpy function (see [1] for details)
References
----------
[1] https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
self.method = method
self.random_state = np.random.RandomState()
self.randgen = getattr(self.random_state, method)
self.numpy_args = numpy_args
def _spawn(self):
return NumpyRandomGenerator(method=self.method, **self.numpy_args)
def reset(self, seed):
self.random_state.seed(seed)
return self
def __next__(self):
return self.randgen(**self.numpy_args)
class FakerGenerator(BaseGenerator):
"""
Generator which produces random elements using one of the methods supported by faker. [1]
[1] https://faker.readthedocs.io/
"""
def __init__(self, method, *, locale=None, **faker_args):
"""
Parameters
----------
method: string
Name of the faker provider to use (see [1] for details)
locale: string
Locale to use when generating data, e.g. 'en_US' (see [1] for details)
faker_args:
Remaining arguments passed to the faker provider (see [1] for details)
References
----------
[1] https://faker.readthedocs.io/
"""
self.method = method
self.locale = locale
self.faker_args = faker_args
self.fake = Faker(locale=locale)
self.randgen = getattr(self.fake, method)
def _spawn(self):
return FakerGenerator(method=self.method, locale=self.locale, **self.faker_args)
def reset(self, seed):
self.fake.seed_instance(seed)
return self
def __next__(self):
return self.randgen(**self.faker_args)
class Sequential(BaseGenerator):
"""
Generator which produces a sequence of strings
of the form:
"PREFIX001"
"PREFIX002"
"PREFIX003"
...
Both the prefix and the number of digits can
be modified by the user.
Example:
>>> s = Sequential(prefix="Foobar_", digits=4)
>>> next(s)
Foobar_0001
>>> next(s)
Foobar_0002
>>> next(s)
Foobar_0003
"""
def __init__(self, *, prefix, digits):
"""
Parameters
----------
prefix: string
Prefix to be appended to generated elements.
digits: integer
Number of digits to use for the sequential numbering.
Any numbers will fewer digits will be zero-padded;
numbers with more digits are unaffected.
"""
self.prefix = prefix
self.digits = digits
self.fmt_str = self.prefix + '{{:0{digits}}}'.format(digits=digits)
self.reset()
def _spawn(self):
return Sequential(prefix=self.prefix, digits=self.digits)
def reset(self, seed=None):
"""
Note that this method supports the `seed` argument (for consistency with other generators),
but its value is ignored - the generator is simply reset to its initial value.
"""
self.cnt = count(start=1)
return self
def __next__(self):
return self.fmt_str.format(next(self.cnt))
class SelectOne(BaseGenerator):
"""
Generator which produces a sequence of items taken from a given set of elements.
"""
def __init__(self, values, p=None):
"""
Parameters
----------
values: list
List of options from which to choose elements.
p: list, optional
The probabilities associated with each element in `values`.
If not given the assumes a uniform distribution over all values.
"""
self.values = values
self.p = p
self.num_values = len(values)
self.randgen = np.random.RandomState()
def __getattr__(self, name):
if name == '_ipython_canary_method_should_not_exist_':
raise NotImplementedError("Ignoring this attribute because it is only used by IPython")
logger.debug(f"Creating new generator to extract attribute '{name}' from elements produced by {self}")
return ExtractAttribute(self, name)
def __next__(self):
"""
Return random element from the list of values provided during initialisation.
"""
idx = self.randgen.choice(self.num_values, p=self.p)
return self.values[idx]
def _spawn(self):
return SelectOne(values=self.values, p=self.p)
def reset(self, seed):
if seed is not None:
self.randgen.seed(seed)
return self
# Define alias for backwards compatibilty
ChooseFrom = SelectOne
class SelectMultiple(BaseGenerator):
"""
Generator which produces a sequence of tuples with elements taken from a given set of elements.
"""
def __init__(self, values, size, p=None):
"""
Parameters
----------
values: list
List of options from which to choose elements.
size: integer
Size of the output tuples.
p: list, optional
The probabilities associated with each element in `values`.
If not given the assumes a uniform distribution over all values.
"""
if isinstance(size, int):
if size < 0:
raise ValueError(f'Size of output tuples cannot be negative. Got: size={size}')
size = Integer(low=size, high=size)
elif not isinstance(size, Integer):
raise TypeError(f'Argument `size` must be an integer or an Integer generator. Got: size={size} (type: {type(size)})')
# Note: the chosen implementation is not the most efficient for large values of `size`
# because we create `size` different SelectOne generators, one for each possible position
# in the the output tuples. Alternatively, we could just create a single SelectOne
# generator to produce all output elements. The advantage of multiple generators is
# that the value of `size` is increased, the first few elements of the output tuples
# remain the same. This feels nice and consistent, but I'm not sure if this is really
# necessary (or even desired). In most cases it probably doesn't matter because `size`
# will typically have a fairly small value.
self.values = values
self.p = p
self.size = size
self._size_gen = size
self._max_size = self._size_gen.high
self._elem_gens = [SelectOne(values, p) for _ in range(self._max_size)]
self._seed_generator = SeedGenerator()
def __next__(self):
"""
Return tuple of length `size` with elements drawn from the list of values provided during initialisation.
"""
cur_size = next(self._size_gen)
return tuple(next(g) for g in islice(self._elem_gens, cur_size))
def _spawn(self):
return SelectMultiple(values=self.values, n=self.size, p=self.p)
def reset(self, seed):
# Reset each individual element generator with a new seed
self._seed_generator.reset(seed)
self._size_gen.reset(next(self._seed_generator))
for g in self._elem_gens:
elem_seed = next(self._seed_generator)
g.reset(elem_seed)
return self
class Subsample(BaseGenerator):
"""
Generator which produces subsamples of a given set of values,
where each item is chosen with a certain probability `p`.
"""
def __init__(self, values, p):
"""
Parameters
----------
values: iterable
The set of values from which to draw subsamples.
p: float
The probability with which each individual element in `values` is
chosen during the subsampling process. Must satisfy 0 <= p <= 1.
"""
if p < 0 or p > 1.0:
raise ValueError(f"The value of p must be in the range [0, 1]. Got: p={p}")
self.values = np.array(values)
self.size = len(self.values)
self.p = p
self.randgen = np.random.RandomState()
def __next__(self):
subsample = self.values[self.randgen.random_sample(self.size) < self.p]
return subsample
def _spawn(self):
return Subsample(self.values, self.p)
def reset(self, seed):
self.randgen.seed(seed)
ALPHANUMERIC_CHARACTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
class CharString(BaseGenerator):
"""
Generator which produces a sequence of character strings.
"""
def __init__(self, *, length=None, min_length=None, max_length=None, charset=None):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator
(mutually exclusive with `min_length`/`max_length`).
min_length, max_length: integer
Minimum and maximum length of character strings produced by this generator
(mutually exclusive with `length`).
charset: iterable
Character set to draw from when generating strings.
Default: alphanumeric characters (both lowercase and uppercase letters).
"""
self.min_length, self.max_length = self._get_min_and_max_length(length, min_length, max_length)
self.charset = charset or ALPHANUMERIC_CHARACTERS
self.seed_gen = SeedGenerator()
self.char_gen = SelectOne(self.charset)
self.length_gen = Integer(low=self.min_length, high=self.max_length)
def _get_min_and_max_length(self, length, min_length, max_length):
error_msg = ("Either 'length' or both 'min_length' and 'max_length' must be specified. "
f"Got: length={length}, min_length={min_length}, max_length={max_length}")
if length is None:
if (min_length is None or max_length is None):
raise ValueError(error_msg)
else:
return min_length, max_length
else:
if not (min_length is None and max_length is None):
raise ValueError(error_msg)
else:
return length, length
def _spawn(self):
return CharString(min_length=self.min_length, max_length=self.max_length, charset=self.charset)
def __next__(self):
chars = [next(self.char_gen) for _ in range(next(self.length_gen))]
return ''.join(chars)
def reset(self, seed):
self.seed_gen.reset(seed)
self.char_gen.reset(next(self.seed_gen))
self.length_gen.reset(next(self.seed_gen))
return self
class DigitString(CharString):
"""
Generator which produces a sequence of strings containing only digits.
"""
def __init__(self, *, length=None, min_length=None, max_length=None):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator
(mutually exclusive with `min_length`/`max_length`).
min_length, max_length: integer
Minimum and maximum length of character strings produced by this generator
(mutually exclusive with `length`).
seed: integer (optional)
Seed to initialise this random generator.
"""
charset = "0123456789"
super().__init__(length=length, min_length=min_length, max_length=max_length, charset=charset)
def _spawn(self):
return DigitString(min_length=self.min_length, max_length=self.max_length)
def _identity(x):
"Helper function which returns its argument unchanged"
return x
class HashDigest(CharString):
"""
Generator which produces a sequence of hex strings representing hash digest values.
"""
def __init__(self, *, length=None, min_length=None, max_length=None, as_bytes=False):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator
(mutually exclusive with `min_length`/`max_length`).
min_length, max_length: integer
Minimum and maximum length of character strings produced by this generator
(mutually exclusive with `length`).
as_bytes: bool
If True, return byte-string obtained from converting each
pair of consecutive characters in the hash digest string
to an ASCII value. Note that in this case `length` must be
an even number and the actual number of bytes returned in
each generated hash digest byte string is length/2.
"""
if as_bytes and (length % 2) != 0:
raise ValueError(f"Length must be an even number if as_bytes=True (got: length={length})")
charset = "0123456789ABCDEF"
self.as_bytes = as_bytes
self._maybe_convert_type = bytes.fromhex if self.as_bytes else _identity
super().__init__(length=length, min_length=min_length, max_length=max_length, charset=charset)
def __next__(self):
return self._maybe_convert_type(super().__next__())
def _spawn(self):
return HashDigest(min_length=self.min_length, max_length=self.max_length, as_bytes=self.as_bytes)
class GeolocationPair(TupleGenerator):
"""
Generator which produces a sequence of (lon, lat) coordinates.
"""
def __init__(self):
self.lon_gen = Float(-180, 180)
self.lat_gen = Float(-90, 90)
self.tuple_len = 2
def _spawn(self):
return GeolocationPair()
def __next__(self):
return (next(self.lon_gen), next(self.lat_gen))
def reset(self, seed):
self.lon_gen.reset(seed)
self.lat_gen.reset(seed)
return self
def Geolocation():
return Split(GeolocationPair())
class ShapelyGeolocationPair(TupleGenerator):
"""
Generator which produces random locations inside a shapely polygon
or multipolygon. This is a helper class and most users will probably
find the GeoJSONGeolocationPair generator more useful.
"""
def __init__(self, shp, max_tries=100):
if not isinstance(shp, (Polygon, MultiPolygon)):
raise TypeError(f"Argument 'shp' must be of type Polygon or MultiPolygon. Got: {type(shp)}")
self.tuple_len = 2
self.shape = shapely.geometry.shape(shp)
lon_min, lat_min, lon_max, lat_max = self.shape.bounds
self.lon_gen = Float(lon_min, lon_max)
self.lat_gen = Float(lat_min, lat_max)
self.max_tries = max_tries
self.seed_generator = SeedGenerator()
def __repr__(self):
return f"<ShapelyShape, area={self.area:.3f}>"
def _spawn(self):
return ShapelyGeolocationPair(self.shape, max_tries=self.max_tries)
@property
def area(self):
return self.shape.area
def __next__(self):
for cnt in range(1, self.max_tries + 1):
pt = Point(next(self.lon_gen), next(self.lat_gen))
if pt.within(self.shape):
return (pt.x, pt.y)
else:
logger.debug(f"Generated point is not within shape. Trying again... [{cnt}/{self.max_tries}]")
raise RuntimeError(f"Could not generate point in shape after {self.max_tries} attempts")
def reset(self, seed):
self.seed_generator.reset(seed)
self.lon_gen.reset(next(self.seed_generator))
self.lat_gen.reset(next(self.seed_generator))
return self
class GeoJSONGeolocationPair(TupleGenerator):
"""
Generator which produces random locations inside a geographic area.
"""
def __init__(self, geojson):
self.geojson = geojson
self.tuple_len = 2
self.shape_gens = [ShapelyGeolocationPair(shapely.geometry.shape(feature['geometry'])) for feature in geojson['features']]
self.shape_gen_chooser = np.random.RandomState() # TODO: make this a tohu generator, too
areas = np.array([s.area for s in self.shape_gens])
self.choice_probs = areas / areas.sum()
self.seed_generator = SeedGenerator()
def _spawn(self):
return GeoJSONGeolocationPair(self.geojson)
def __next__(self):
sg = self.shape_gen_chooser.choice(self.shape_gens, p=self.choice_probs)
return next(sg)
def reset(self, seed):
self.seed_generator.reset(seed)
self.shape_gen_chooser.seed(next(self.seed_generator))
for g in self.shape_gens:
g.reset(next(self.seed_generator))
class TimestampError(Exception):
"""
Custom exception raised to indicate Timestamp errors
(for example when the end time of the timestamp interval
is before the start time).
"""
class Timestamp(BaseGenerator):
"""
Generator which produces a timestamp.
"""
def __init__(self, *, start=None, end=None, date=None, fmt='%Y-%m-%d %H:%M:%S', uppercase=False):
"""
Initialise timestamp generator.
Note that `start` and `end` are both inclusive. They can either
be full timestamps such as 'YYYY-MM-DD HH:MM:SS', or date strings
such as 'YYYY-MM-DD'. Note that in the latter case `end` is
interpreted as as `YYYY-MM-DD 23:59:59`, i.e. the day is counted
in full.
Args:
start (date string): start time
end (date string): end time
date (str): string of the form YYYY-MM-DD. This is an alternative (and mutually exclusive)
to specifying `start` and `end`.
fmt (str): formatting string for output (same format as accepted by `datetime.strftime`)
uppercase (bool): if True, months are formatted with all uppercase letters (default: False)
"""
if (date is not None):
if not (start is None and end is None):
raise TimestampError("Argument `date` is mutually exclusive with `start` and `end`.")
self.start = dt.datetime.strptime(date, '%Y-%m-%d')
self.end = self.start + dt.timedelta(hours=23, minutes=59, seconds=59)
else:
if (start is None or end is None):
raise TimestampError("Either `date` or both `start` and `end` must be provided.")
try:
self.start = dt.datetime.strptime(start, '%Y-%m-%d')
except ValueError:
self.start = dt.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
try:
self.end = dt.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
except ValueError:
end_date = dt.datetime.strptime(end, '%Y-%m-%d')
self.end = dt.datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59)
self.dt = int((self.end - self.start).total_seconds())
self.fmt = fmt
self.uppercase = uppercase
if self.dt < 0:
raise TimestampError("Start time must be before end time. Got: start_time='{}', end_time='{}'."
"".format(self.start, self.end))
self.offsetgen = Integer(0, self.dt)
def _spawn(self):
return Timestamp(
start=self.start.strftime('%Y-%m-%d %H:%M:%S'),
end=self.end.strftime('%Y-%m-%d %H:%M:%S'),
fmt=self.fmt,
uppercase=self.uppercase)
def __next__(self):
s = (self.start + dt.timedelta(seconds=next(self.offsetgen))).strftime(self.fmt)
return s.upper() if self.uppercase else s
def reset(self, seed):
self.offsetgen.reset(seed)
return self
class TimestampNEW(BaseGenerator):
"""
Generator which produces random timestamps.
"""
def __init__(self, *, start=None, end=None, date=None):
"""
Initialise timestamp generator.
Note that `start` and `end` are both inclusive. They can either
be full timestamps such as 'YYYY-MM-DD HH:MM:SS', or date strings
such as 'YYYY-MM-DD'. Note that in the latter case `end` is
interpreted as as `YYYY-MM-DD 23:59:59`, i.e. the day is counted
in full.
Args:
start (date string): start time
end (date string): end time
date (str): string of the form YYYY-MM-DD. This is an alternative (and mutually exclusive)
to specifying `start` and `end`.
"""
if (date is not None):
if not (start is None and end is None):
raise TimestampError("Argument `date` is mutually exclusive with `start` and `end`.")
self.start = dt.datetime.strptime(date, '%Y-%m-%d')
self.end = self.start + dt.timedelta(hours=23, minutes=59, seconds=59)
else:
if (start is None or end is None):
raise TimestampError("Either `date` or both `start` and `end` must be provided.")
try:
self.start = dt.datetime.strptime(start, '%Y-%m-%d')
except ValueError:
self.start = dt.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
try:
self.end = dt.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
except ValueError:
end_date = dt.datetime.strptime(end, '%Y-%m-%d')
self.end = dt.datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59)
self.dt = int((self.end - self.start).total_seconds())
if self.dt < 0:
raise TimestampError("Start time must be before end time. Got: start_time='{}', end_time='{}'."
"".format(self.start, self.end))
self.offsetgen = Integer(0, self.dt)
def _spawn(self):
return TimestampNEW(
start=self.start.strftime('%Y-%m-%d %H:%M:%S'),
end=self.end.strftime('%Y-%m-%d %H:%M:%S')) # yapf: disable
def __next__(self):
ts = (self.start + dt.timedelta(seconds=next(self.offsetgen)))
return ts
def reset(self, seed):
self.offsetgen.reset(seed)
return self
class SeedGenerator:
"""
This class is used in custom generators to create a collection of
seeds when reset() is called, so that each of the constituent
generators can be re-initialised with a different seed in a
reproducible way.
Note: This is almost identical to the `Integer` class above, but
we need a version which does *not* inherit from `BaseGenerator`,
otherwise the automatic item class creation in `CustomGeneratorMeta`
gets confused.
"""
def __init__(self):
self.randgen = Random()
self.minval = 0
self.maxval = 2**32 - 1
def reset(self, seed):
self.randgen.seed(seed)
def __iter__(self):
return self
def __next__(self):
return self.randgen.randint(self.minval, self.maxval)
class Nth(BaseGenerator):
"""
Generator which allows to extract the n-th element from a tuple-producing generator.
"""
def __init__(self, g, idx):
self.g = g
self.idx = idx
def __next__(self):
return next(self.g)[self.idx]
def _spawn(self):
return Nth(self.g._spawn(), self.idx)
def reset(self, seed):
self.g.reset(seed)
return self
First = partial(Nth, idx=0)
Second = partial(Nth, idx=1)
class TohuBufferOverflow(Exception):
"""
Custom exception to indicate a buffer overflow due to a mishandling of linked generators.
"""
class BufferedTuple(BaseGenerator):
"""
Helper class which allows buffered extraction
of items from a tuple generator.
"""
def __init__(self, g, *, tuple_len, maxbuffer=10):
"""
Parameters
----------
g: tohu generator
The generator to be buffered. The items produced by `g` must be tuples.
tuple_len: integer
Length of tuples produced by g.
maxbuffer: integer
Maximum number of items to be buffered. Note that under normal
circumstances a single buffered element should be sufficient,
so the default of 10 is probably overcautious. ;)
"""
self.g = g
self.tuple_len = tuple_len
self.maxbuffer = maxbuffer
self._reset_queues()
def __repr__(self):
return f"<BufferedTuple, parent: {self.g}>"
def _spawn(self):
raise NotImplementedError(
"BufferedTuple cannot be spawned directly. Instead, call _spawn_parent() to rewire it to a spawned version of its parent tuple generator."
)
def _spawn_parent(self):
self.g = self.g._spawn()
def _reset_queues(self):
self._queues = [Queue(maxsize=self.maxbuffer) for _ in range(self.tuple_len)]
def _refill(self):
item = next(self.g)
for x, queue in zip(item, self._queues):
try:
queue.put_nowait(x)
except Full:
raise TohuBufferOverflow(
"Buffer filled up because elements from multiple linked generators were not consumed at the same rate.")
def reset(self, seed):
self.g.reset(seed)
self._reset_queues()
return self
def next_nth(self, n):
if self._queues[n].empty():
self._refill()
return self._queues[n].get()
class InvalidGeneratorError(Exception):
"""
Custom exception to indicate an instance of NthElementBuffered
that has been spawned and is therefore invalid.
"""
class NthElementBuffered(BaseGenerator):
"""
Helper class to iterate over the Nth element in a buffered tuple-generator.
"""
def __init__(self, g_buffered, idx):
assert isinstance(g_buffered, BufferedTuple)
self.g_buffered = g_buffered
self.idx = idx
self.invalid = False
def __repr__(self):
return f"<NthElementBuffered: idx={self.idx}, parent={self.g_buffered}>"
def __next__(self):
if self.invalid:
# Note: ideally it would be nice to avoid checking a flag every time
# next() is called to avoid a performance penalty. Originally I tried
# to invalidate generators by overwriting the __next__ method. However,
# magic methods are looked up on the class, not the instance (see [1]),
# so we cannot do this for individual instances.
#
# On the other hand, the overhead seems to be on the order of 1.3us per call
# so this is probably fine.
#
# [1] https://stackoverflow.com/questions/33824228/why-wont-dynamically-adding-a-call-method-to-an-instance-work
raise InvalidGeneratorError(
"This NthElementBuffered generator has been spawned and is therefore invalid. Please call next() on the spawned version instead."
)
return self.g_buffered.next_nth(self.idx)
def invalidate(self):
"""
Invalidate this generator so that it's impossible to call next() on it.
"""
self.invalid = True
def _spawn(self):
logging.debug("Generator of type NthElementBuffered is being spawned. Note that "
"internally this will spawn its parent, rewire all of the original "
"parent's children to the new parent and invalidate this generator.")
self.g_buffered._spawn_parent()
self.invalidate()
return NthElementBuffered(self.g_buffered, self.idx)
def reset(self, seed):
self.g_buffered.reset(seed)
return self
def Split(g, *, maxbuffer=10, tuple_len=None):
"""
Split a tuple generator into individual generators.
Parameters
----------
g: tohu generator
The generator to be split. The items produced by `g` must be tuples.
maxbuffer: integer
Maximum number of items produced by `g` that will be buffered.
"""
if tuple_len is None:
try:
tuple_len = g.tuple_len
except AttributeError:
raise ValueError("Argument 'tuple_len' must be given since generator is not of type TupleGenerator.")
g_buffered = BufferedTuple(g, maxbuffer=maxbuffer, tuple_len=tuple_len)
return tuple(NthElementBuffered(g_buffered, i) for i in range(tuple_len))
class Zip(TupleGenerator):
"""
Create a generator which produces tuples that are
combined from the elements produced by multiple
individual generators.
"""
def __init__(self, *generators):
self._generators = [g._spawn() for g in generators]
self.seed_generator = SeedGenerator()
self.tuple_len = len(self._generators)
def __next__(self):
return tuple(next(g) for g in self._generators)
def reset(self, seed):
self.seed_generator.reset(seed)
for g in self._generators:
new_seed = next(self.seed_generator)
g.reset(new_seed)
return self
class IterateOver(BaseGenerator):
"""
Generator which simply iterates over all items in a given iterable
"""
def __init__(self, g):
assert isinstance(g, (list, tuple, ItemList)), \
"For the time being we enforce g being a list, tuple or ItemList so that we can spawn and reset this generator."
self.g = g
self._iter_g = None
self.reset()
def __repr__(self):
return f"<IterateOver, list with {len(self.g)} items>"
def __next__(self):
return next(self._iter_g)
def __iter__(self):
return self
def _spawn(self):
return IterateOver(self.g)
def reset(self, seed=None):
self._iter_g = iter(self.g)
| {
"content_hash": "a3fe74fddbe8e802c1117d8765a2d726",
"timestamp": "",
"source": "github",
"line_count": 1025,
"max_line_length": 150,
"avg_line_length": 32.9609756097561,
"alnum_prop": 0.6034926742637265,
"repo_name": "maxalbert/tohu",
"id": "fe1c682b4ffe2ed6bbab5e2c53a21332c572b710",
"size": "33809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tohu/v2/generators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "244324"
},
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "568361"
}
],
"symlink_target": ""
} |
"""Unit test package for ahps_alerts."""
| {
"content_hash": "515477444490f67623878d2c561f8c55",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 41,
"alnum_prop": 0.6829268292682927,
"repo_name": "DavidHickman/ahps_alerts",
"id": "0a3f057a0e7d2280e2e34d8b87a3d78399e1fbdf",
"size": "66",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2285"
},
{
"name": "Python",
"bytes": "11061"
}
],
"symlink_target": ""
} |
import os
from dpa.action import Action, ActionError, ActionAborted
from dpa.location import current_location_code
from dpa.product import Product, ProductError, validate_product_name
from dpa.product.version import ProductVersion, ProductVersionError
from dpa.product.representation import (
ProductRepresentation,
ProductRepresentationError,
)
from dpa.product.representation.status import (
ProductRepresentationStatus,
ProductRepresentationStatusError,
)
from dpa.ptask import PTask, PTaskError
from dpa.ptask.area import PTaskArea, PTaskAreaError
from dpa.ptask.cli import ParsePTaskSpecArg
from dpa.ptask.spec import PTaskSpec
from dpa.shell.output import Output, Style
from dpa.sync.action import SyncAction
from dpa.user import current_username
# -----------------------------------------------------------------------------
class ProductCreateAction(Action):
"""Create a new product."""
name = "create"
target_type = "product"
# -------------------------------------------------------------------------
@classmethod
def setup_cl_args(cls, parser):
# product name
parser.add_argument(
"product",
help="The spec representing the ptask to create.",
)
# ptask spec
parser.add_argument(
"ptask",
action=ParsePTaskSpecArg,
nargs="?",
help="The ptask creating the product.",
)
parser.add_argument(
"-c", "--category",
default=None,
help="The category of product to create.",
)
# description
parser.add_argument(
"-d", "--description",
default=None,
help="A description of the ptask being created.",
)
parser.add_argument(
"-r", "--resolution",
default=None,
help="The resolution of the product being created.",
)
parser.add_argument(
"-t", "--type",
dest="file_type",
default=None,
help="The file type for this product.",
)
parser.add_argument(
"-p", "--path",
default=None,
help="Path to existing file/dir for this product.",
)
parser.add_argument(
"-v", "--version",
type=int,
default=None,
help="Version of the product to create.",
)
parser.add_argument(
"-n", "--note",
default=None,
help="Release note for this particular version.",
)
# -------------------------------------------------------------------------
def __init__(self, product, ptask, category=None, description=None,
resolution=None, file_type=None, path=None, version=None, note=None):
super(ProductCreateAction, self).__init__(
product,
ptask,
category=category,
description=description,
resolution=resolution,
file_type=file_type,
path=path,
version=version,
note=note,
)
self._product = product
self._ptask = ptask
self._category = category
self._description = description
self._resolution = resolution
self._file_type = file_type
self._path = path
self._version = version
self._note = note
if self._path:
if not os.path.exists(self._path):
raise ActionError("Supplied path does not exist.")
file_type = os.path.splitext(self._path)[1].lstrip(".")
if self._file_type and self._file_type != file_type:
raise ActionError(
"Different file types specified: {t1} & {t2}".format(
t1=self._file_type, t2=file_type))
else:
self._file_type = file_type
self._parse_product()
# -------------------------------------------------------------------------
def execute(self):
# create the product
self._create_product()
self._create_version()
self._create_representation()
self._create_status()
self._create_area()
self._sync_path()
if self.interactive:
print "\nProduct created successfully.\n"
# -------------------------------------------------------------------------
def prompt(self):
print ""
product_display = " [{b}{p}{r}]".format(
b=Style.bright,
p=self._product,
r=Style.reset,
)
# category menu
if not self._category:
self._category = Output.prompt_menu(
"Product categories",
"{pd} category".format(pd=product_display),
zip(*[Product.category_names()] * 2),
)
# description
if not self._description:
self._description = Output.prompt(
'{pd} description'.format(pd=product_display),
blank=False,
)
# file type
if not self._file_type:
if not self._file_type:
self._file_type = Output.prompt(
"{pd} file type".format(pd=product_display),
blank=False,
)
# resolution
if not self._resolution:
self._resolution = Output.prompt(
"{pd} resolution (Return if none)".format(pd=product_display),
blank=True,
)
if not self._resolution:
self._resolution = 'none'
# -------------------------------------------------------------------------
def undo(self):
if hasattr(self, '_product') and isinstance(self._product, Product):
self.logger.warning("Cannot undo attempted product creation. " + \
"See pipeline admin for help cleaning up unwanted products.")
# -------------------------------------------------------------------------
def validate(self):
if self.interactive:
print "\nValidating product arguments ..."
# should have a valid product name,
self._name = validate_product_name(self._name)
if self._category:
if not self._category in Product.category_names():
raise ActionError("Unrecognized category.")
else:
raise ActionError("Category is required.")
if not self._description:
raise ActionError("Description is required.")
# ptask
if not isinstance(self._ptask, PTask):
try:
self._ptask = PTask.get(self._ptask)
except PTaskError:
raise ActionError("Could not determine ptask.")
if self._version:
self._ptask_version = self._ptask.version(self._version)
else:
self._ptask_version = self._ptask.latest_version
if not self._ptask_version:
raise ActionError("Could not determine ptask version.")
if not self._note:
self._note = "None"
if self._path:
if not os.path.exists(self._path):
raise ActionError("Supplied path does not exist.")
if (os.path.isdir(self._path) and
not self._path.endswith(os.path.sep)):
self._path += os.path.sep
# -------------------------------------------------------------------------
def verify(self):
name = "Name"
category = "Category"
description = "Description"
file_type = "File type"
resolution = "Resolution"
ptask_ver = "PTask version"
path = "Path"
note = "Note"
output = Output()
output.header_names = [
name,
category,
description,
file_type,
resolution,
ptask_ver,
path,
note,
]
output.add_item(
{
name: self._name,
category: self._category,
description: self._description,
file_type: self._file_type,
resolution: self._resolution,
ptask_ver: self._ptask_version.spec,
path: self._path,
note: self._note,
},
color_all=Style.bright,
)
output.title = "Confirm create:"
output.dump()
if not Output.prompt_yes_no(Style.bright + "Create" + Style.reset):
raise ActionAborted("User chose not to proceed.")
# -------------------------------------------------------------------------
@property
def name(self):
return self._name
# -------------------------------------------------------------------------
@property
def product(self):
return self._product
# -------------------------------------------------------------------------
@property
def product_area(self):
return self._product_area
# -------------------------------------------------------------------------
@property
def product_version(self):
return self._product_version
# -------------------------------------------------------------------------
@property
def product_repr(self):
return self._product_repr
# -------------------------------------------------------------------------
@property
def product_repr_status(self):
return self._product_repr_status
# -------------------------------------------------------------------------
@property
def ptask(self):
return self._ptask
# -------------------------------------------------------------------------
@property
def ptask_version(self):
return self._ptask_version
# -------------------------------------------------------------------------
@property
def category(self):
return self._category
# -------------------------------------------------------------------------
@property
def description(self):
return self._description
# -------------------------------------------------------------------------
@property
def resolution(self):
return self._resolution
# -------------------------------------------------------------------------
@property
def file_type(self):
return self._file_type
# -------------------------------------------------------------------------
@property
def path(self):
return self._path
# -------------------------------------------------------------------------
@property
def version(self):
return self._version
# -------------------------------------------------------------------------
@property
def note(self):
return self._note
# -------------------------------------------------------------------------
def _create_product(self):
existing = Product.list(
name=self._name,
category=self._category,
ptask=self._ptask.spec,
)
if len(existing) == 1:
self._product = existing.pop()
self._product.update(description=self._description)
if self.interactive:
print "\nBase product exists: " + \
Style.bright + self._product.spec + Style.reset
else:
try:
self._product = Product.create(
ptask=self._ptask.spec,
name=self._name,
category=self._category,
description=self._description,
creator=current_username(),
)
except ProductError as e:
raise ActionError("Unable to create product: " + str(e))
else:
if self.interactive:
print "\nCreated base product: " + \
Style.bright + self._product.spec + Style.reset
# -------------------------------------------------------------------------
def _create_version(self):
existing = ProductVersion.list(
ptask_version=self._ptask_version.spec,
product=self._product.spec,
)
if len(existing) == 1:
self._product_version = existing.pop()
self._product_version.update(release_note=self._note)
if self.interactive:
print "\nProduct version exists: " + \
Style.bright + self._product_version.spec + Style.reset
else:
try:
self._product_version = ProductVersion.create(
ptask_version=self._ptask_version.spec,
product=self._product.spec,
release_note=self._note,
creator=current_username(),
)
except ProductVersionError as e:
raise ActionError("Unable to create product version: " + str(e))
else:
if self.interactive:
print "\nCreated product version: " + \
Style.bright + self._product_version.spec + Style.reset
# -------------------------------------------------------------------------
def _create_representation(self):
existing = ProductRepresentation.list(
product_version=self._product_version.spec,
resolution=self._resolution,
representation_type=self._file_type,
)
if len(existing) == 1:
self._product_repr = existing.pop()
if self.interactive:
print "\nProduct representation exists: " + \
Style.bright + self._product_repr.spec + Style.reset
else:
try:
self._product_repr = ProductRepresentation.create(
product_version=self._product_version.spec,
resolution=self._resolution,
representation_type=self._file_type,
creation_location=current_location_code(),
creator=current_username(),
)
except ProductRepresentationError as e:
raise ActionError(
"Unable to create product representation: " + str(e))
else:
if self.interactive:
print "\nCreated product representation: " + \
Style.bright + self._product_repr.spec + Style.reset
# -------------------------------------------------------------------------
def _create_status(self):
existing = ProductRepresentationStatus.list(
product_representation=self._product_repr.spec,
location=current_location_code(),
)
if len(existing) == 1:
self._product_repr_status = existing.pop()
if self.interactive:
print "\nProduct representation status exists: " + \
Style.bright + self._product_repr_status.spec + Style.reset
else:
try:
self._product_repr_status = ProductRepresentationStatus.create(
product_representation=self._product_repr.spec,
location=current_location_code(),
status=1,
)
except ProductRepresentationStatusError as e:
raise ActionError(
"Unable to create product representation status: " + str(e))
else:
if self.interactive:
print "\nCreated product representation status: " + \
Style.bright + self._product_repr_status.spec + \
Style.reset
# -------------------------------------------------------------------------
def _create_area(self):
try:
self._product_area = PTaskArea.create(self.product_repr)
except PTaskAreaError as e:
raise ActionError(
"Unable to create product area on disk: " + str(e))
# -------------------------------------------------------------------------
def _parse_product(self):
# split the supplied product string to determine additional parts.
# this sets unknown values to None
(name, cat, ver, file_type, res) = list(
self._product.split(PTaskSpec.SEPARATOR, 5) + [None] * 5
)[0:5]
# name
self._name = name
# category
if cat:
if self._category and self._category != cat:
raise ActionError(
"Different categories specified: {c1} & {c2}".format(
c1=self._category, c2=cat))
self._category = cat
# version
if ver:
try:
ver = int(ver)
except ValueError:
raise ActionError("Invalid version specified.")
if self._version and self._version != ver:
raise ActionError(
"Different versions specified: {v1} & {v2}".format(
v1=self._version, v2=ver))
self._version = ver
# file_type
if file_type:
if self._file_type and self._file_type != file_type:
raise ActionError(
"Different file types specified: {t1} & {t2}".format(
t1=self._file_type, t2=file_type))
self._file_type = file_type
# resolution
if res:
if self._resolution and self._resolution != res:
raise ActionError(
"Different resolutions specified: {r1} & {r2}".format(
r1=self._resolution, c2=res))
self._resolution = res
# -------------------------------------------------------------------------
def _sync_path(self):
if self._product_version.published:
raise ActionError(
"Product version is already published at this version. Can " + \
"not overwrite: " + self._product_version.spec)
if not self._path:
return
sync = SyncAction(
source=self._path,
destination=self._product_area.path,
)
try:
sync()
except ActionError as e:
raise ActionError("Failed to sync product source: " + str(e))
| {
"content_hash": "c35acae5ca01dcefff8ab0b48c78ed5e",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 80,
"avg_line_length": 33.710431654676256,
"alnum_prop": 0.45697060235821374,
"repo_name": "Clemson-DPA/dpa-pipe",
"id": "48e7338f8cdbe57d51c9602439628f42836a4379",
"size": "18825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dpa/product/action/create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "802278"
},
{
"name": "Shell",
"bytes": "9817"
}
],
"symlink_target": ""
} |
people = 30
cars = 40
buses = 15
if cars > people:
print "We should take the cars."
elif cars < people:
print "We should not take the cars."
else:
print "We can't decide."
if buses > cars:
print "That's too many buses."
elif buses < cars:
print "Maybe we could take the buses."
else:
print "We still can't decide."
if people > buses:
print "Alright, let's just take the buses."
else:
print "Fine, let's stay home then." | {
"content_hash": "a40fdfeef6d7d3ada967631f12d63228",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 44,
"avg_line_length": 18.73913043478261,
"alnum_prop": 0.6821345707656613,
"repo_name": "kaitlinahrens/learn-python-the-hard-way",
"id": "26a81f0fd77726e04abf7bd504706b77a471e7eb",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex30.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37546"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from prompt_toolkit.keys import Keys
from ..key_bindings import KeyBindings
__all__ = [
'load_cpr_bindings',
]
def load_cpr_bindings():
key_bindings = KeyBindings()
@key_bindings.add(Keys.CPRResponse, save_before=lambda e: False)
def _(event):
"""
Handle incoming Cursor-Position-Request response.
"""
# The incoming data looks like u'\x1b[35;1R'
# Parse row/col information.
row, col = map(int, event.data[2:-1].split(';'))
# Report absolute cursor position to the renderer.
event.app.renderer.report_absolute_cursor_row(row)
return key_bindings
| {
"content_hash": "56efbe41aabd920bff9b0cc5c387491c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 27.08,
"alnum_prop": 0.6440177252584933,
"repo_name": "lmregus/Portfolio",
"id": "bc3334a22039abb673aeb0b737daf43c05cecceb",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/key_binding/bindings/cpr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27682"
},
{
"name": "C++",
"bytes": "25458"
},
{
"name": "CSS",
"bytes": "12842"
},
{
"name": "HTML",
"bytes": "49171"
},
{
"name": "Java",
"bytes": "99711"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Python",
"bytes": "42857"
},
{
"name": "Shell",
"bytes": "5710"
}
],
"symlink_target": ""
} |
import sys
def version(prog):
print(prog, "0.0.1")
def opterr(prog, wrngopt):
print("%s: invalid option -- \'%s\'" % (prog, wrngopt.opt), file=sys.stderr)
print("Try \'%s --help\' for more information." % (prog), file=sys.stderr)
exit(1)
def ferr(prog, file):
print("%s: %s: No such file or directory" % (prog, file), file=sys.stderr)
exit(2)
def missop(prog):
print("%s: missing operand" % (prog), file=sys.stderr)
print("Try \'%s --help\' for more information." % (prog), file=sys.stderr)
exit(1)
def extraop(prog, op):
print("%s: extra operand \'%s\'" % (prog, op), file=sys.stderr)
print("Try \'%s --help\' for more information." % (prog), file=sys.stderr)
exit(1)
| {
"content_hash": "9f4e5a3e8e00b8fbb555d70317e6a634",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 25.964285714285715,
"alnum_prop": 0.5969738651994498,
"repo_name": "FreeBirdLjj/py-coreutils",
"id": "a71f409ebfc944ebdb323e9d96c3b1248147fc67",
"size": "727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "57062"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
class brocade_firmware(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def firmware_autoupgrade_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade = ET.SubElement(firmware, "autoupgrade")
enable = ET.SubElement(autoupgrade, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_path(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
path = ET.SubElement(autoupgrade_params, "path")
path.text = kwargs.pop('path')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
protocol = ET.SubElement(autoupgrade_params, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_ipaddress(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
ipaddress = ET.SubElement(autoupgrade_params, "ipaddress")
ipaddress.text = kwargs.pop('ipaddress')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
username = ET.SubElement(autoupgrade_params, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_pss(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
pss = ET.SubElement(autoupgrade_params, "pass")
pss.text = kwargs.pop('pss')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_input_fwdl_tid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
input = ET.SubElement(fwdl_status, "input")
fwdl_tid = ET.SubElement(input, "fwdl-tid")
fwdl_tid.text = kwargs.pop('fwdl_tid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_number_of_entries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
number_of_entries = ET.SubElement(output, "number-of-entries")
number_of_entries.text = kwargs.pop('number_of_entries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_state = ET.SubElement(output, "fwdl-state")
fwdl_state.text = kwargs.pop('fwdl_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
index = ET.SubElement(fwdl_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_message_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
message_id = ET.SubElement(fwdl_entries, "message-id")
message_id.text = kwargs.pop('message_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
date_and_time_info = ET.SubElement(fwdl_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
message = ET.SubElement(fwdl_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_slot(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_slot = ET.SubElement(fwdl_entries, "blade-slot")
blade_slot.text = kwargs.pop('blade_slot')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_swbd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd")
blade_swbd.text = kwargs.pop('blade_swbd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_name = ET.SubElement(fwdl_entries, "blade-name")
blade_name.text = kwargs.pop('blade_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_state = ET.SubElement(fwdl_entries, "blade-state")
blade_state.text = kwargs.pop('blade_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
input = ET.SubElement(activate_status, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_overall_error_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
overall_error_msg = ET.SubElement(output, "overall-error-msg")
overall_error_msg.text = kwargs.pop('overall_error_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_activate_entries_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
activate_entries = ET.SubElement(output, "activate-entries")
rbridge_id = ET.SubElement(activate_entries, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_activate_entries_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
activate_entries = ET.SubElement(output, "activate-entries")
status = ET.SubElement(activate_entries, "status")
status.text = kwargs.pop('status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_user(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
user = ET.SubElement(input, "user")
user.text = kwargs.pop('user')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
password = ET.SubElement(input, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
host = ET.SubElement(input, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_directory(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
directory = ET.SubElement(input, "directory")
directory.text = kwargs.pop('directory')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_file(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
file = ET.SubElement(input, "file")
file.text = kwargs.pop('file')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_cluster_options_auto_activate_auto_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
cluster_options = ET.SubElement(input, "cluster-options")
auto_activate = ET.SubElement(cluster_options, "auto-activate")
auto_activate = ET.SubElement(auto_activate, "auto-activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_cluster_options_coldboot_coldboot(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
cluster_options = ET.SubElement(input, "cluster-options")
coldboot = ET.SubElement(cluster_options, "coldboot")
coldboot = ET.SubElement(coldboot, "coldboot")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
protocol = ET.SubElement(input, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_fwdl_cmd_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
fwdl_cmd_status = ET.SubElement(output, "fwdl-cmd-status")
fwdl_cmd_status.text = kwargs.pop('fwdl_cmd_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_fwdl_cmd_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
fwdl_cmd_msg = ET.SubElement(output, "fwdl-cmd-msg")
fwdl_cmd_msg.text = kwargs.pop('fwdl_cmd_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_cluster_output_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
cluster_output = ET.SubElement(output, "cluster-output")
rbridge_id = ET.SubElement(cluster_output, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_cluster_output_fwdl_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
cluster_output = ET.SubElement(output, "cluster-output")
fwdl_status = ET.SubElement(cluster_output, "fwdl-status")
fwdl_status.text = kwargs.pop('fwdl_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_cluster_output_fwdl_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
cluster_output = ET.SubElement(output, "cluster-output")
fwdl_msg = ET.SubElement(cluster_output, "fwdl-msg")
fwdl_msg.text = kwargs.pop('fwdl_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
input = ET.SubElement(logical_chassis_fwdl_status, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
rbridge_id = ET.SubElement(cluster_fwdl_entries, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_state = ET.SubElement(cluster_fwdl_entries, "fwdl-state")
fwdl_state.text = kwargs.pop('fwdl_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
index = ET.SubElement(fwdl_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_message_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
message_id = ET.SubElement(fwdl_entries, "message-id")
message_id.text = kwargs.pop('message_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
date_and_time_info = ET.SubElement(fwdl_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
message = ET.SubElement(fwdl_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_slot(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_slot = ET.SubElement(fwdl_entries, "blade-slot")
blade_slot.text = kwargs.pop('blade_slot')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd")
blade_swbd.text = kwargs.pop('blade_swbd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_name = ET.SubElement(fwdl_entries, "blade-name")
blade_name.text = kwargs.pop('blade_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_state = ET.SubElement(fwdl_entries, "blade-state")
blade_state.text = kwargs.pop('blade_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_last_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_last_state = ET.SubElement(output, "dad-last-state")
dad_last_state.text = kwargs.pop('dad_last_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_status_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
index = ET.SubElement(dad_status_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_status_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
date_and_time_info = ET.SubElement(dad_status_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_status_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
message = ET.SubElement(dad_status_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade = ET.SubElement(firmware, "autoupgrade")
enable = ET.SubElement(autoupgrade, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_path(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
path = ET.SubElement(autoupgrade_params, "path")
path.text = kwargs.pop('path')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
protocol = ET.SubElement(autoupgrade_params, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_ipaddress(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
ipaddress = ET.SubElement(autoupgrade_params, "ipaddress")
ipaddress.text = kwargs.pop('ipaddress')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
username = ET.SubElement(autoupgrade_params, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def firmware_autoupgrade_params_pss(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
pss = ET.SubElement(autoupgrade_params, "pass")
pss.text = kwargs.pop('pss')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_input_fwdl_tid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
input = ET.SubElement(fwdl_status, "input")
fwdl_tid = ET.SubElement(input, "fwdl-tid")
fwdl_tid.text = kwargs.pop('fwdl_tid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_number_of_entries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
number_of_entries = ET.SubElement(output, "number-of-entries")
number_of_entries.text = kwargs.pop('number_of_entries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_state = ET.SubElement(output, "fwdl-state")
fwdl_state.text = kwargs.pop('fwdl_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
index = ET.SubElement(fwdl_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_message_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
message_id = ET.SubElement(fwdl_entries, "message-id")
message_id.text = kwargs.pop('message_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
date_and_time_info = ET.SubElement(fwdl_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
message = ET.SubElement(fwdl_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_slot(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_slot = ET.SubElement(fwdl_entries, "blade-slot")
blade_slot.text = kwargs.pop('blade_slot')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_swbd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd")
blade_swbd.text = kwargs.pop('blade_swbd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_name = ET.SubElement(fwdl_entries, "blade-name")
blade_name.text = kwargs.pop('blade_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_state = ET.SubElement(fwdl_entries, "blade-state")
blade_state.text = kwargs.pop('blade_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def fwdl_status_output_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
input = ET.SubElement(activate_status, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_overall_error_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
overall_error_msg = ET.SubElement(output, "overall-error-msg")
overall_error_msg.text = kwargs.pop('overall_error_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_activate_entries_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
activate_entries = ET.SubElement(output, "activate-entries")
rbridge_id = ET.SubElement(activate_entries, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def activate_status_output_activate_entries_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
activate_entries = ET.SubElement(output, "activate-entries")
status = ET.SubElement(activate_entries, "status")
status.text = kwargs.pop('status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_user(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
user = ET.SubElement(input, "user")
user.text = kwargs.pop('user')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
password = ET.SubElement(input, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
host = ET.SubElement(input, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_directory(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
directory = ET.SubElement(input, "directory")
directory.text = kwargs.pop('directory')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_file(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
file = ET.SubElement(input, "file")
file.text = kwargs.pop('file')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_cluster_options_auto_activate_auto_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
cluster_options = ET.SubElement(input, "cluster-options")
auto_activate = ET.SubElement(cluster_options, "auto-activate")
auto_activate = ET.SubElement(auto_activate, "auto-activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_cluster_options_coldboot_coldboot(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
cluster_options = ET.SubElement(input, "cluster-options")
coldboot = ET.SubElement(cluster_options, "coldboot")
coldboot = ET.SubElement(coldboot, "coldboot")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_input_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
protocol = ET.SubElement(input, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_fwdl_cmd_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
fwdl_cmd_status = ET.SubElement(output, "fwdl-cmd-status")
fwdl_cmd_status.text = kwargs.pop('fwdl_cmd_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_fwdl_cmd_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
fwdl_cmd_msg = ET.SubElement(output, "fwdl-cmd-msg")
fwdl_cmd_msg.text = kwargs.pop('fwdl_cmd_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_cluster_output_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
cluster_output = ET.SubElement(output, "cluster-output")
rbridge_id = ET.SubElement(cluster_output, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_cluster_output_fwdl_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
cluster_output = ET.SubElement(output, "cluster-output")
fwdl_status = ET.SubElement(cluster_output, "fwdl-status")
fwdl_status.text = kwargs.pop('fwdl_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_sanity_output_cluster_output_fwdl_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
output = ET.SubElement(logical_chassis_fwdl_sanity, "output")
cluster_output = ET.SubElement(output, "cluster-output")
fwdl_msg = ET.SubElement(cluster_output, "fwdl-msg")
fwdl_msg.text = kwargs.pop('fwdl_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
input = ET.SubElement(logical_chassis_fwdl_status, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
rbridge_id = ET.SubElement(cluster_fwdl_entries, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_state = ET.SubElement(cluster_fwdl_entries, "fwdl-state")
fwdl_state.text = kwargs.pop('fwdl_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
index = ET.SubElement(fwdl_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_message_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
message_id = ET.SubElement(fwdl_entries, "message-id")
message_id.text = kwargs.pop('message_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
date_and_time_info = ET.SubElement(fwdl_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
message = ET.SubElement(fwdl_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_slot(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_slot = ET.SubElement(fwdl_entries, "blade-slot")
blade_slot.text = kwargs.pop('blade_slot')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd")
blade_swbd.text = kwargs.pop('blade_swbd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_name = ET.SubElement(fwdl_entries, "blade-name")
blade_name.text = kwargs.pop('blade_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_state = ET.SubElement(fwdl_entries, "blade-state")
blade_state.text = kwargs.pop('blade_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_last_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_last_state = ET.SubElement(output, "dad-last-state")
dad_last_state.text = kwargs.pop('dad_last_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_status_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
index = ET.SubElement(dad_status_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_status_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
date_and_time_info = ET.SubElement(dad_status_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def dad_status_output_dad_status_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
message = ET.SubElement(dad_status_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| {
"content_hash": "f3a32345dce744a518bd40d11d0060f6",
"timestamp": "",
"source": "github",
"line_count": 1478,
"max_line_length": 112,
"avg_line_length": 43.08592692828146,
"alnum_prop": 0.6287746737645451,
"repo_name": "SivagnanamCiena/pynos",
"id": "c3b4ef40b947aeda86aedc6acc216a2fba9940ca",
"size": "63703",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pynos/versions/base/yang/brocade_firmware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20665905"
}
],
"symlink_target": ""
} |
from .base import *
from .airos import * | {
"content_hash": "c523e5699f2691f5b9620f71f70d6759",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 20,
"avg_line_length": 20,
"alnum_prop": 0.725,
"repo_name": "ninuxorg/netengine",
"id": "bc70f2cb2bcf1ff374a1f7c1f28a59b118a3180d",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/http/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6783"
},
{
"name": "Python",
"bytes": "901096"
}
],
"symlink_target": ""
} |
from oslo_versionedobjects import fields
from magnum.db import api as dbapi
from magnum.objects import base
# possible status
ERROR = 'Error'
RUNNING = 'Running'
STOPPED = 'Stopped'
PAUSED = 'Paused'
@base.MagnumObjectRegistry.register
class Container(base.MagnumPersistentObject, base.MagnumObject,
base.MagnumObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': fields.IntegerField(),
'uuid': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'image': fields.StringField(nullable=True),
'command': fields.StringField(nullable=True),
'bay_uuid': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(container, db_container):
"""Converts a database entity to a formal object."""
for field in container.fields:
container[field] = db_container[field]
container.obj_reset_changes()
return container
@staticmethod
def _from_db_object_list(db_objects, cls, context):
"""Converts a list of database entities to a list of formal objects."""
return [Container._from_db_object(cls(context), obj)
for obj in db_objects]
@base.remotable_classmethod
def get_by_id(cls, context, container_id):
"""Find a container based on its integer id and return a Container object.
:param container_id: the id of a container.
:returns: a :class:`Container` object.
"""
db_container = cls.dbapi.get_container_by_id(context, container_id)
container = Container._from_db_object(cls(context), db_container)
return container
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
"""Find a container based on uuid and return a :class:`Container` object.
:param uuid: the uuid of a container.
:param context: Security context
:returns: a :class:`Container` object.
"""
db_container = cls.dbapi.get_container_by_uuid(context, uuid)
container = Container._from_db_object(cls(context), db_container)
return container
@base.remotable_classmethod
def get_by_name(cls, context, name):
"""Find a bay based on name and return a Bay object.
:param name: the logical name of a bay.
:param context: Security context
:returns: a :class:`Bay` object.
"""
db_bay = cls.dbapi.get_container_by_name(context, name)
bay = Container._from_db_object(cls(context), db_bay)
return bay
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of Container objects.
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`Container` object.
"""
db_containers = cls.dbapi.get_container_list(context, limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return Container._from_db_object_list(db_containers, cls, context)
@base.remotable
def create(self, context=None):
"""Create a Container record in the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
values = self.obj_get_changes()
db_container = self.dbapi.create_container(values)
self._from_db_object(self, db_container)
@base.remotable
def destroy(self, context=None):
"""Delete the Container from the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
self.dbapi.destroy_container(self.uuid)
self.obj_reset_changes()
@base.remotable
def save(self, context=None):
"""Save updates to this Container.
Updates will be made column by column based on the result
of self.what_changed().
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
updates = self.obj_get_changes()
self.dbapi.update_container(self.uuid, updates)
self.obj_reset_changes()
@base.remotable
def refresh(self, context=None):
"""Loads updates for this Container.
Loads a container with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded container column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
| {
"content_hash": "c79214d0dcac2a227c0ec810a8da15b0",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 82,
"avg_line_length": 39.15882352941176,
"alnum_prop": 0.5995193029893345,
"repo_name": "ramielrowe/magnum",
"id": "fefd460f401f5f4ce8e129e4b2b04e38488ba490",
"size": "7249",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "magnum/objects/container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "1724974"
},
{
"name": "Shell",
"bytes": "22656"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import threading
_counters = {}
_counter_lock = threading.Lock()
def next_counter(name):
with _counter_lock:
cur = _counters[name] = _counters.get(name, 0) + 1
return cur
def next_name(name):
return "%s-%i" % (name, next_counter(name))
| {
"content_hash": "6525a7eb29f40c2542785bc8da08cb28",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 21.133333333333333,
"alnum_prop": 0.6529968454258676,
"repo_name": "morelab/weblabdeusto",
"id": "b7af0dcf0e182e43ffa1d3afe3d53705901159ce",
"size": "699",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/src/voodoo/counter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP.NET",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "202991"
},
{
"name": "CoffeeScript",
"bytes": "39146"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "620835"
},
{
"name": "Java",
"bytes": "856300"
},
{
"name": "JavaScript",
"bytes": "1606001"
},
{
"name": "Less",
"bytes": "13422"
},
{
"name": "Makefile",
"bytes": "24995"
},
{
"name": "Mako",
"bytes": "1236"
},
{
"name": "PHP",
"bytes": "159985"
},
{
"name": "Python",
"bytes": "3739523"
},
{
"name": "Shell",
"bytes": "7880"
},
{
"name": "Smarty",
"bytes": "42585"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
} |
from .backends.couch import TendersStorage, ReleasesStorage
from .backends.fs import FSStorage
__all__ = [TendersStorage, ReleasesStorage, FSStorage]
| {
"content_hash": "9b70e0d87a7526310068116187525498",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 59,
"avg_line_length": 37.75,
"alnum_prop": 0.8079470198675497,
"repo_name": "yshalenyk/ocds.storage",
"id": "50deea64753eba9045450fdf6f3f14ab567b408c",
"size": "151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocds/storage/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12967"
}
],
"symlink_target": ""
} |
"""Workflow for serving static content of Django projects."""
from django_cloud_deploy.cloudlib import storage
from google.auth import credentials
class StaticContentServeWorkflow(object):
"""A class to control the workflow of serving static content."""
# The directory in Google Cloud Storage bucket to save static content
GCS_STATIC_FILE_DIR = 'static'
def __init__(self, credentials: credentials.Credentials):
self._storage_client = (
storage.StorageClient.from_credentials(credentials))
def serve_static_content(self, project_id: str, bucket_name: str,
static_content_dir: str):
"""Do all the work for serving static content of the provided project.
The static content is served with a public Google Cloud Storage Bucket.
Args:
project_id: Id of GCP project.
bucket_name: Name of the bucket to create and serve static content.
static_content_dir: Absolute path of the directory for static
content.
"""
self._storage_client.collect_static_content()
self._storage_client.create_bucket(project_id, bucket_name)
self._storage_client.make_bucket_public(bucket_name)
self._storage_client.upload_content(bucket_name, static_content_dir,
self.GCS_STATIC_FILE_DIR)
def set_cors_policy(self, bucket_name: str, origin: str):
self._storage_client.set_cors_policy(bucket_name, origin)
def serve_secret_content(self, project_id: str, bucket_name: str,
secrec_content_dir: str):
"""Do all the work for serving secret content of the provided project.
The secret content is served with a Google Cloud Storage Bucket.
Args:
project_id: Id of GCP project.
bucket_name: Name of the bucket to create and serve secret content.
secrec_content_dir: Absolute path of the directory for secret
content.
"""
self._storage_client.create_bucket(project_id, bucket_name)
self._storage_client.upload_content(bucket_name, secrec_content_dir,
'secrets')
def update_static_content(self, bucket_name: str, static_content_dir: str):
"""Update GCS bucket after user modified the Django app.
Args:
bucket_name: Name of the bucket to create and serve static content.
static_content_dir: Absolute path of the directory for static
content.
"""
self._storage_client.collect_static_content()
self._storage_client.upload_content(bucket_name, static_content_dir,
self.GCS_STATIC_FILE_DIR)
| {
"content_hash": "27f075c3637212aec547caa94519db1d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 41.940298507462686,
"alnum_prop": 0.6274021352313167,
"repo_name": "GoogleCloudPlatform/django-cloud-deploy",
"id": "191a59714c4487090ca3380745d220f888484521",
"size": "3385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cloud_deploy/workflow/_static_content_serve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "Python",
"bytes": "583745"
},
{
"name": "Shell",
"bytes": "997"
}
],
"symlink_target": ""
} |
from google.cloud import datacatalog_v1
def sample_update_taxonomy():
# Create a client
client = datacatalog_v1.PolicyTagManagerClient()
# Initialize request argument(s)
request = datacatalog_v1.UpdateTaxonomyRequest(
)
# Make the request
response = client.update_taxonomy(request=request)
# Handle the response
print(response)
# [END datacatalog_v1_generated_PolicyTagManager_UpdateTaxonomy_sync]
| {
"content_hash": "1695a4d2b27fd07acd56059d373e73cd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 24.444444444444443,
"alnum_prop": 0.7340909090909091,
"repo_name": "googleapis/python-datacatalog",
"id": "ea3262c0cda22bff8452aa1c00df7239e39da96a",
"size": "1838",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/datacatalog_v1_generated_policy_tag_manager_update_taxonomy_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3073442"
},
{
"name": "Shell",
"bytes": "30675"
}
],
"symlink_target": ""
} |
"""
monolith is an argparse based command line interface framework
"""
VERSION = (0, 3, 4, 'dev')
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:4]))
| {
"content_hash": "e1a351721f8f2347228f1ce3115888d1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 23.923076923076923,
"alnum_prop": 0.617363344051447,
"repo_name": "lukaszb/monolith",
"id": "f21d61b04994aa22d23a10063369bc218f10f10b",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monolith/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "49145"
},
{
"name": "Shell",
"bytes": "3592"
}
],
"symlink_target": ""
} |
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/usr'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = [os.path.join(ROOT,'bin',dll) for dll in dlls]
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print("failed to find headers for libxml2: update includes_dir")
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print("failed to find headers for libiconv: update includes_dir")
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print("failed to find and generate stubs for libxml2, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print("libxslt stub generator not found, libxslt not built")
else:
try:
import xsltgenerator
except:
print("failed to generate stubs for libxslt, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print("failed to find headers for libxslt: update includes_dir")
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.9.2",
description = descr,
author = "Daniel Veillard",
author_email = "[email protected]",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| {
"content_hash": "c727d0c2a09761dace536bf1d80d52f4",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 78,
"avg_line_length": 28.962184873949578,
"alnum_prop": 0.5927752792688235,
"repo_name": "schober2/tc359_helper_methods",
"id": "e5043237c78111e9fa78ae0fe95624a7af9064e2",
"size": "6966",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "path/ruby/2.0.0/gems/nokogiri-1.6.6.2/ext/nokogiri/tmp/x86_64-apple-darwin14/ports/libxml2/2.9.2/libxml2-2.9.2/python/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Bison",
"bytes": "7171"
},
{
"name": "C",
"bytes": "9857703"
},
{
"name": "C++",
"bytes": "133582"
},
{
"name": "CSS",
"bytes": "820"
},
{
"name": "Clean",
"bytes": "6801"
},
{
"name": "Erlang",
"bytes": "169"
},
{
"name": "HTML",
"bytes": "8485754"
},
{
"name": "JavaScript",
"bytes": "32107"
},
{
"name": "Makefile",
"bytes": "1820"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "PHP",
"bytes": "19144"
},
{
"name": "Perl",
"bytes": "3028"
},
{
"name": "Python",
"bytes": "1110469"
},
{
"name": "Ruby",
"bytes": "647022"
},
{
"name": "Shell",
"bytes": "367777"
},
{
"name": "VCL",
"bytes": "4153"
},
{
"name": "XSLT",
"bytes": "174081"
}
],
"symlink_target": ""
} |
"""Example code of learning a large scale convnet from LSVRC2012 dataset
with multiple GPUs using data parallelism.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
You need to install chainer with NCCL to run this example.
Please see https://github.com/nvidia/nccl#build--run .
"""
import argparse
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
from chainer.training import updaters
import alex
import googlenet
import googlenetbn
import nin
import resnet50
import resnext50
import train_imagenet
def main():
archs = {
'alex': alex.Alex,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
'resnext50': resnext50.ResNeXt50,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(),
default='nin', help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--gpus', '-g', type=int, nargs='*',
default=[0, 1, 2, 3])
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from {}'.format(args.initmodel))
chainer.serializers.load_npz(args.initmodel, model)
# Load the datasets and mean file
mean = np.load(args.mean)
train = train_imagenet.PreprocessedDataset(
args.train, args.root, mean, model.insize)
val = train_imagenet.PreprocessedDataset(
args.val, args.root, mean, model.insize, False)
# These iterators load the images with subprocesses running in parallel to
# the training/validation.
devices = tuple(args.gpus)
train_iters = [
chainer.iterators.MultiprocessIterator(i,
args.batchsize,
n_processes=args.loaderjob)
for i in chainer.datasets.split_dataset_n_random(train, len(devices))]
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = updaters.MultiprocessParallelUpdater(train_iters, optimizer,
devices=devices)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
if args.test:
val_interval = 5, 'epoch'
log_interval = 1, 'epoch'
else:
val_interval = 100000, 'iteration'
log_interval = 1000, 'iteration'
trainer.extend(extensions.Evaluator(val_iter, model, device=args.gpus[0]),
trigger=val_interval)
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=2))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| {
"content_hash": "24e57421f9e5f77774b52278c80c415f",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 40.148148148148145,
"alnum_prop": 0.6452029520295203,
"repo_name": "tkerola/chainer",
"id": "a41c5f50bf54cf5d2ea8b03f5afcbe668f5a3d4f",
"size": "5442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/imagenet/train_imagenet_data_parallel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
} |
"""Submit one or more try jobs."""
import argparse
import json
import os
import re
import subprocess
import sys
import tempfile
BUCKET_SKIA_PRIMARY = 'skia/skia.primary'
BUCKET_SKIA_INTERNAL = 'skia-internal/skia.internal'
INFRA_BOTS = os.path.join('infra', 'bots')
TASKS_JSON = os.path.join(INFRA_BOTS, 'tasks.json')
REPO_INTERNAL = 'https://skia.googlesource.com/internal_test.git'
TMP_DIR = os.path.join(tempfile.gettempdir(), 'sktry')
SKIA_ROOT = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
SKIA_INFRA_BOTS = os.path.join(SKIA_ROOT, INFRA_BOTS)
sys.path.insert(0, SKIA_INFRA_BOTS)
import utils
def find_repo_root():
"""Find the root directory of the current repository."""
cwd = os.getcwd()
while True:
if os.path.isdir(os.path.join(cwd, '.git')):
return cwd
next_cwd = os.path.dirname(cwd)
if next_cwd == cwd:
raise Exception('Failed to find repo root!')
cwd = next_cwd
def get_jobs(repo):
"""Obtain the list of jobs from the given repo."""
# Maintain a copy of the repo in the temp dir.
if not os.path.isdir(TMP_DIR):
os.mkdir(TMP_DIR)
with utils.chdir(TMP_DIR):
dirname = repo.split('/')[-1]
if not os.path.isdir(dirname):
subprocess.check_call([
utils.GIT, 'clone', '--mirror', repo, dirname])
with utils.chdir(dirname):
subprocess.check_call([utils.GIT, 'remote', 'update'])
jobs = json.loads(subprocess.check_output([
utils.GIT, 'show', 'master:%s' % JOBS_JSON]))
return (BUCKET_SKIA_INTERNAL, jobs)
def main():
# Parse arguments.
d = 'Helper script for triggering try jobs.'
parser = argparse.ArgumentParser(description=d)
parser.add_argument('--list', action='store_true', default=False,
help='Just list the jobs; do not trigger anything.')
parser.add_argument('--internal', action='store_true', default=False,
help=('If set, include internal jobs. You must have '
'permission to view internal repos.'))
parser.add_argument('job', nargs='?', default=None,
help='Job name or regular expression to match job names.')
args = parser.parse_args()
# Load and filter the list of jobs.
jobs = []
tasks_json = os.path.join(find_repo_root(), TASKS_JSON)
with open(tasks_json) as f:
tasks_cfg = json.load(f)
skia_primary_jobs = []
for k, v in tasks_cfg['jobs'].iteritems():
skia_primary_jobs.append(k)
skia_primary_jobs.sort()
# TODO(borenet): This assumes that the current repo is associated with the
# skia.primary bucket. This will work for most repos but it would be better to
# look up the correct bucket to use.
jobs.append((BUCKET_SKIA_PRIMARY, skia_primary_jobs))
if args.internal:
jobs.append(get_jobs(REPO_INTERNAL))
if args.job:
filtered_jobs = []
for bucket, job_list in jobs:
filtered = [j for j in job_list if re.search(args.job, j)]
if len(filtered) > 0:
filtered_jobs.append((bucket, filtered))
jobs = filtered_jobs
# Display the list of jobs.
if len(jobs) == 0:
print 'Found no jobs matching "%s"' % repr(args.job)
sys.exit(1)
count = 0
for bucket, job_list in jobs:
count += len(job_list)
print 'Found %d jobs:' % count
for bucket, job_list in jobs:
print ' %s:' % bucket
for j in job_list:
print ' %s' % j
if args.list:
return
if count > 1:
# Prompt before triggering jobs.
resp = raw_input('\nDo you want to trigger these jobs? (y/n or i for '
'interactive): ')
print ''
if resp != 'y' and resp != 'i':
sys.exit(1)
if resp == 'i':
filtered_jobs = []
for bucket, job_list in jobs:
new_job_list = []
for j in job_list:
incl = raw_input(('Trigger %s? (y/n): ' % j))
if incl == 'y':
new_job_list.append(j)
if len(new_job_list) > 0:
filtered_jobs.append((bucket, new_job_list))
jobs = filtered_jobs
# Trigger the try jobs.
for bucket, job_list in jobs:
cmd = ['git', 'cl', 'try', '-B', bucket]
for j in job_list:
cmd.extend(['-b', j])
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
# Output from the command will fall through, so just exit here rather than
# printing a stack trace.
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "26401e142fdcedb39a924742d80e3f48",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 80,
"avg_line_length": 31.316901408450704,
"alnum_prop": 0.6210928715988306,
"repo_name": "HalCanary/skia-hc",
"id": "ccf9c2f09637a9bee7552df5cd1fb8fa33ea1476",
"size": "4605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/try.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1277297"
},
{
"name": "Batchfile",
"bytes": "865"
},
{
"name": "C",
"bytes": "505166"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "32234337"
},
{
"name": "CMake",
"bytes": "2850"
},
{
"name": "CSS",
"bytes": "3078"
},
{
"name": "Dockerfile",
"bytes": "14764"
},
{
"name": "GLSL",
"bytes": "109164"
},
{
"name": "Go",
"bytes": "135327"
},
{
"name": "HTML",
"bytes": "1321397"
},
{
"name": "Java",
"bytes": "167849"
},
{
"name": "JavaScript",
"bytes": "463920"
},
{
"name": "Lex",
"bytes": "2521"
},
{
"name": "Lua",
"bytes": "70982"
},
{
"name": "Makefile",
"bytes": "13502"
},
{
"name": "Objective-C",
"bytes": "83351"
},
{
"name": "Objective-C++",
"bytes": "366996"
},
{
"name": "PHP",
"bytes": "139510"
},
{
"name": "PowerShell",
"bytes": "1432"
},
{
"name": "Python",
"bytes": "1055437"
},
{
"name": "Shell",
"bytes": "95010"
}
],
"symlink_target": ""
} |
"""Common utility library."""
from __future__ import with_statement
import datetime
import functools
import inspect
import os
import re
import sys
import six
__all__ = [
'Error',
'decode_datetime',
'get_package_for_module',
'positional',
'TimeZoneOffset',
'total_seconds',
]
class Error(Exception):
"""Base class for protorpc exceptions."""
_TIME_ZONE_RE_STRING = r"""
# Examples:
# +01:00
# -05:30
# Z12:00
((?P<z>Z) | (?P<sign>[-+])
(?P<hours>\d\d) :
(?P<minutes>\d\d))$
"""
_TIME_ZONE_RE = re.compile(_TIME_ZONE_RE_STRING, re.IGNORECASE | re.VERBOSE)
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments may be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it
becomes a required keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
One can omit the argument to 'positional' altogether, and then no
arguments with default values may be passed positionally. This
would be equivalent to placing a '*' before the first argument
with a default value in Python 3. If there are no arguments with
default values, and no argument is given to 'positional', an error
is raised.
@positional
def fn(arg1, arg2, required_kw1=None, required_kw2=0):
...
fn(1, 3, 5) # Raises exception.
fn(1, 3) # Ok.
fn(1, 3, required_kw1=5) # Ok.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a keyword-only argument is provided as a positional
parameter.
ValueError if no maximum number of arguments is provided and the function
has no arguments with default values.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
raise TypeError('%s() takes at most %d positional argument%s '
'(%d given)' % (wrapped.__name__,
max_positional_args,
plural_s, len(args)))
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
if defaults is None:
raise ValueError(
'Functions with no keyword arguments must specify '
'max_positional_args')
return positional(len(args) - len(defaults))(max_positional_args)
@positional(1)
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, six.string_types):
try:
module = sys.modules[module]
except KeyError:
return None
try:
return six.text_type(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return six.text_type(base_name)
else:
return u'.'.join(split_name[:-1])
return six.text_type(module.__name__)
def total_seconds(offset):
"""Backport of offset.total_seconds() from python 2.7+."""
seconds = offset.days * 24 * 60 * 60 + offset.seconds
microseconds = seconds * 10**6 + offset.microseconds
return microseconds / (10**6 * 1.0)
class TimeZoneOffset(datetime.tzinfo):
"""Time zone information as encoded/decoded for DateTimeFields."""
def __init__(self, offset):
"""Initialize a time zone offset.
Args:
offset: Integer or timedelta time zone offset, in minutes from UTC.
This can be negative.
"""
super(TimeZoneOffset, self).__init__()
if isinstance(offset, datetime.timedelta):
offset = total_seconds(offset) / 60
self.__offset = offset
def utcoffset(self, _):
"""Get the a timedelta with the time zone's offset from UTC.
Returns:
The time zone offset from UTC, as a timedelta.
"""
return datetime.timedelta(minutes=self.__offset)
def dst(self, _):
"""Get the daylight savings time offset.
The formats that ProtoRPC uses to encode/decode time zone
information don't contain any information about daylight
savings time. So this always returns a timedelta of 0.
Returns:
A timedelta of 0.
"""
return datetime.timedelta(0)
def decode_datetime(encoded_datetime):
"""Decode a DateTimeField parameter from a string to a python datetime.
Args:
encoded_datetime: A string in RFC 3339 format.
Returns:
A datetime object with the date and time specified in encoded_datetime.
Raises:
ValueError: If the string is not in a recognized format.
"""
# Check if the string includes a time zone offset. Break out the
# part that doesn't include time zone info. Convert to uppercase
# because all our comparisons should be case-insensitive.
time_zone_match = _TIME_ZONE_RE.search(encoded_datetime)
if time_zone_match:
time_string = encoded_datetime[:time_zone_match.start(1)].upper()
else:
time_string = encoded_datetime.upper()
if '.' in time_string:
format_string = '%Y-%m-%dT%H:%M:%S.%f'
else:
format_string = '%Y-%m-%dT%H:%M:%S'
decoded_datetime = datetime.datetime.strptime(time_string, format_string)
if not time_zone_match:
return decoded_datetime
# Time zone info was included in the parameter. Add a tzinfo
# object to the datetime. Datetimes can't be changed after they're
# created, so we'll need to create a new one.
if time_zone_match.group('z'):
offset_minutes = 0
else:
sign = time_zone_match.group('sign')
hours, minutes = [int(value) for value in
time_zone_match.group('hours', 'minutes')]
offset_minutes = hours * 60 + minutes
if sign == '-':
offset_minutes *= -1
return datetime.datetime(decoded_datetime.year,
decoded_datetime.month,
decoded_datetime.day,
decoded_datetime.hour,
decoded_datetime.minute,
decoded_datetime.second,
decoded_datetime.microsecond,
TimeZoneOffset(offset_minutes))
| {
"content_hash": "6c75c3b2acba3547b4a26627e1d6d02b",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 80,
"avg_line_length": 31.31159420289855,
"alnum_prop": 0.5889840314741958,
"repo_name": "KaranToor/MA450",
"id": "4df045869d7a8fccfa99de8002009ab6ef81866e",
"size": "9243",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/platform/gsutil/third_party/apitools/apitools/base/protorpclite/util.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.