text
stringlengths 4
1.02M
| meta
dict |
---|---|
from matrixbot import utils
class BroadcastPlugin:
def __init__(self, bot, settings):
self.name = "BroadcastPlugin"
self.logger = utils.get_logger()
self.bot = bot
self.settings = settings
self.logger.info("BroadcastPlugin loaded (%(name)s)" % settings)
def dispatch(self, handler):
return
def command(self, sender, room_id, body, handler):
self.logger.debug("BroadcastPlugin command")
plugin_name = self.settings["name"]
command_list = body.split()[1:]
if len(command_list) > 0 and command_list[0] == plugin_name:
if sender not in self.settings["users"]:
self.logger.warning("User %s not autorized to use BroadcastPlugin" % self)
return
announcement = body[body.find(plugin_name) + len(plugin_name) + 1:]
html = "<h3>%s</h3> <pre>%s</pre>" % ('Announcement:', announcement)
for room_id in self.settings["rooms"]:
room_id = self.bot.get_real_room_id(room_id)
self.logger.debug(
"BroadcastPlugin announcement in %s: %s" % (
room_id, announcement
)
)
self.bot.send_html(room_id,html)
def help(self, sender, room_id, handler):
self.logger.debug("BroadcastPlugin help")
if sender in self.settings["users"]:
if self.bot.is_private_room(room_id, self.bot.get_user_id()):
message = "%(name)s Announcement to be sent\n" % self.settings
else:
message = "%(username)s: %(name)s Announcement to be sent\n" % self.settings
handler(room_id, message)
| {
"content_hash": "f5c2557d5ff2b16a85dd052807e5b6cd",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 92,
"avg_line_length": 40.48837209302326,
"alnum_prop": 0.5577254451464676,
"repo_name": "psaavedra/matrix-bot",
"id": "06e83a7b2e95c7b0b844b90fae8f9800b84574cd",
"size": "1765",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "matrixbot/plugins/broadcast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84552"
},
{
"name": "Shell",
"bytes": "3959"
}
],
"symlink_target": ""
} |
"""Prepares the data used for DeepLab training/evaluation."""
import tensorflow as tf
from deeplab.core import feature_extractor
from deeplab.core import preprocess_utils
# The probability of flipping the images and labels
# left-right during training
_PROB_OF_FLIP = 0.5
def preprocess_image_and_label(image,
label,
crop_height,
crop_width,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
ignore_label=255,
is_training=True,
model_variant=None):
"""Preprocesses the image and label.
Args:
image: Input image.
label: Ground truth annotation label.
crop_height: The height value used to crop the image and label.
crop_width: The width value used to crop the image and label.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
ignore_label: The label value which will be ignored for training and
evaluation.
is_training: If the preprocessing is used for training or not.
model_variant: Model variant (string) for choosing how to mean-subtract the
images. See feature_extractor.network_map for supported model variants.
Returns:
original_image: Original image (could be resized).
processed_image: Preprocessed image.
label: Preprocessed ground truth segmentation label.
Raises:
ValueError: Ground truth label not provided during training.
"""
if is_training and label is None:
raise ValueError('During training, label must be provided.')
if model_variant is None:
tf.logging.warning('Default mean-subtraction is performed. Please specify '
'a model_variant. See feature_extractor.network_map for '
'supported model variants.')
# Keep reference to original image.
original_image = image
processed_image = tf.cast(image, tf.float32)
if label is not None:
label = tf.cast(label, tf.int32)
# Resize image and label to the desired range.
if min_resize_value or max_resize_value:
[processed_image, label] = (
preprocess_utils.resize_to_range(
image=processed_image,
label=label,
min_size=min_resize_value,
max_size=max_resize_value,
factor=resize_factor,
align_corners=True))
# The `original_image` becomes the resized image.
original_image = tf.identity(processed_image)
# Data augmentation by randomly scaling the inputs.
if is_training:
scale = preprocess_utils.get_random_scale(
min_scale_factor, max_scale_factor, scale_factor_step_size)
processed_image, label = preprocess_utils.randomly_scale_image_and_label(
processed_image, label, scale)
processed_image.set_shape([None, None, 3])
# Pad image and label to have dimensions >= [crop_height, crop_width]
image_shape = tf.shape(processed_image)
image_height = image_shape[0]
image_width = image_shape[1]
target_height = image_height + tf.maximum(crop_height - image_height, 0)
target_width = image_width + tf.maximum(crop_width - image_width, 0)
# Pad image with mean pixel value.
mean_pixel = tf.reshape(
feature_extractor.mean_pixel(model_variant), [1, 1, 3])
processed_image = preprocess_utils.pad_to_bounding_box(
processed_image, 0, 0, target_height, target_width, mean_pixel)
if label is not None:
label = preprocess_utils.pad_to_bounding_box(
label, 0, 0, target_height, target_width, ignore_label)
# Randomly crop the image and label.
if is_training and label is not None:
processed_image, label = preprocess_utils.random_crop(
[processed_image, label], crop_height, crop_width)
processed_image.set_shape([crop_height, crop_width, 3])
if label is not None:
label.set_shape([crop_height, crop_width, 1])
if is_training:
# Randomly left-right flip the image and label.
processed_image, label, _ = preprocess_utils.flip_dim(
[processed_image, label], _PROB_OF_FLIP, dim=1)
return original_image, processed_image, label
| {
"content_hash": "3a81d0a9dc5050fc62c833c06641d146",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 80,
"avg_line_length": 39.53658536585366,
"alnum_prop": 0.6512440880115156,
"repo_name": "tombstone/models",
"id": "9ca8bce4eb9104b22469419c4e6af4beaba9406a",
"size": "5580",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "research/deeplab/input_preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
} |
from __future__ import print_function
# For flag of bool type, we consider the strings 'False', 'false' and '0'
# as False, and the string s'True', 'true', '1' as True.
# We also accept the bool type as its corresponding value!
import logging
import os
import shlex
import sys
import warnings
from six import StringIO
import theano
from theano.compat import configparser as ConfigParser
from six import string_types
_logger = logging.getLogger('theano.configparser')
class TheanoConfigWarning(Warning):
def warn(cls, message, stacklevel=0):
warnings.warn(message, cls, stacklevel=stacklevel + 3)
warn = classmethod(warn)
THEANO_FLAGS = os.getenv("THEANO_FLAGS", "")
# The THEANO_FLAGS environment variable should be a list of comma-separated
# [section.]option=value entries. If the section part is omitted, there should
# be only one section that contains the given option.
def parse_config_string(config_string, issue_warnings=True):
"""
Parses a config string (comma-separated key=value components) into a dict.
"""
config_dict = {}
my_splitter = shlex.shlex(config_string, posix=True)
my_splitter.whitespace = ','
my_splitter.whitespace_split = True
for kv_pair in my_splitter:
kv_pair = kv_pair.strip()
if not kv_pair:
continue
kv_tuple = kv_pair.split('=', 1)
if len(kv_tuple) == 1:
if issue_warnings:
TheanoConfigWarning.warn(
("Config key '%s' has no value, ignoring it"
% kv_tuple[0]),
stacklevel=1)
else:
k, v = kv_tuple
# subsequent values for k will override earlier ones
config_dict[k] = v
return config_dict
THEANO_FLAGS_DICT = parse_config_string(THEANO_FLAGS, issue_warnings=True)
# THEANORC can contain a colon-delimited list of config files, like
# THEANORC=~lisa/.theanorc:~/.theanorc
# In that case, definitions in files on the right (here, ~/.theanorc) have
# precedence over those in files on the left.
def config_files_from_theanorc():
rval = [os.path.expanduser(s) for s in
os.getenv('THEANORC', '~/.theanorc').split(os.pathsep)]
if os.getenv('THEANORC') is None and sys.platform == "win32":
# to don't need to change the filename and make it open easily
rval.append(os.path.expanduser('~/.theanorc.txt'))
return rval
config_files = config_files_from_theanorc()
theano_cfg = ConfigParser.SafeConfigParser(
{'USER': os.getenv("USER", os.path.split(os.path.expanduser('~'))[-1]),
'LSCRATCH': os.getenv("LSCRATCH", ""),
'TMPDIR': os.getenv("TMPDIR", ""),
'TEMP': os.getenv("TEMP", ""),
'TMP': os.getenv("TMP", ""),
'PID': str(os.getpid()),
}
)
theano_cfg.read(config_files)
# Having a raw version of the config around as well enables us to pass
# through config values that contain format strings.
# The time required to parse the config twice is negligible.
theano_raw_cfg = ConfigParser.RawConfigParser()
theano_raw_cfg.read(config_files)
def change_flags(**kwargs):
"""
Use this as a decorator to change the value of Theano config variable.
Useful during tests.
"""
def change_flags_exec(f):
def inner(*args, **kwargs_):
old_val = {}
for k in kwargs:
l = [v for v in theano.configparser._config_var_list
if v.fullname == k]
assert len(l) == 1
old_val[k] = l[0].__get__(True, None)
try:
for k in kwargs:
l = [v for v in theano.configparser._config_var_list
if v.fullname == k]
assert len(l) == 1
l[0].__set__(None, kwargs[k])
return f(*args, **kwargs_)
finally:
for k in kwargs:
l = [v for v in theano.configparser._config_var_list
if v.fullname == k]
assert len(l) == 1
l[0].__set__(None, old_val[k])
# Make sure that the name of the decorated function remains the same.
inner.__name__ = f.__name__
return inner
return change_flags_exec
def fetch_val_for_key(key):
"""Return the overriding config value for a key.
A successful search returns a string value.
An unsuccessful search raises a KeyError
The (decreasing) priority order is:
- THEANO_FLAGS
- ~./theanorc
"""
# first try to find it in the FLAGS
try:
return THEANO_FLAGS_DICT[key]
except KeyError:
pass
# next try to find it in the config file
# config file keys can be of form option, or section.option
key_tokens = key.rsplit('.', 1)
if len(key_tokens) > 2:
raise KeyError(key)
if len(key_tokens) == 2:
section, option = key_tokens
else:
section, option = 'global', key
try:
try:
return theano_cfg.get(section, option)
except ConfigParser.InterpolationError:
return theano_raw_cfg.get(section, option)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
raise KeyError(key)
_config_var_list = []
def _config_print(thing, buf):
for cv in _config_var_list:
print(cv, file=buf)
print(" Doc: ", cv.doc, file=buf)
print(" Value: ", cv.__get__(True, None), file=buf)
print("", file=buf)
def get_config_md5():
"""
Return a string md5 of the current config options. It should be such that
we can safely assume that two different config setups will lead to two
different strings.
We only take into account config options for which `in_c_key` is True.
"""
all_opts = sorted([c for c in _config_var_list if c.in_c_key],
key=lambda cv: cv.fullname)
return theano.gof.utils.hash_from_code('\n'.join(
['%s = %s' % (cv.fullname, cv.__get__(True, None)) for cv in all_opts]))
class TheanoConfigParser(object):
# properties are installed by AddConfigVar
_i_am_a_config_class = True
def __str__(self):
sio = StringIO()
_config_print(self.__class__, sio)
return sio.getvalue()
# N.B. all instances of TheanoConfigParser give access to the same properties.
config = TheanoConfigParser()
# The data structure at work here is a tree of CLASSES with
# CLASS ATTRIBUTES/PROPERTIES that are either a) INSTANTIATED
# dynamically-generated CLASSES, or b) ConfigParam instances. The root
# of this tree is the TheanoConfigParser CLASS, and the internal nodes
# are the SubObj classes created inside of AddConfigVar().
# Why this design ?
# - The config object is a true singleton. Every instance of
# TheanoConfigParser is an empty instance that looks up attributes/properties
# in the [single] TheanoConfigParser.__dict__
# - The subtrees provide the same interface as the root
# - ConfigParser subclasses control get/set of config properties to guard
# against craziness.
def AddConfigVar(name, doc, configparam, root=config, in_c_key=True):
"""Add a new variable to theano.config
:type name: string for form "[section0.[section1.[etc]]].option"
:param name: the full name for this configuration variable.
:type doc: string
:param doc: What does this variable specify?
:type configparam: ConfigParam instance
:param configparam: an object for getting and setting this configuration
parameter
:type root: object
:param root: used for recursive calls -- do not provide an argument for
this parameter.
:type in_c_key: boolean
:param in_c_key: If True, then whenever this config option changes, the
key associated to compiled C modules also changes, i.e. it may trigger a
compilation of these modules (this compilation will only be partial if it
turns out that the generated C code is unchanged). Set this option to False
only if you are confident this option should not affect C code compilation.
:returns: None
"""
# This method also performs some of the work of initializing ConfigParam
# instances
if root is config:
# only set the name in the first call, not the recursive ones
configparam.fullname = name
sections = name.split('.')
if len(sections) > 1:
# set up a subobject
if not hasattr(root, sections[0]):
# every internal node in the config tree is an instance of its own
# unique class
class SubObj(object):
_i_am_a_config_class = True
setattr(root.__class__, sections[0], SubObj())
newroot = getattr(root, sections[0])
if (not getattr(newroot, '_i_am_a_config_class', False) or
isinstance(newroot, type)):
raise TypeError(
'Internal config nodes must be config class instances',
newroot)
return AddConfigVar('.'.join(sections[1:]), doc, configparam,
root=newroot, in_c_key=in_c_key)
else:
if hasattr(root, name):
raise AttributeError('This name is already taken',
configparam.fullname)
configparam.doc = doc
configparam.in_c_key = in_c_key
# Trigger a read of the value from config files and env vars
# This allow to filter wrong value from the user.
if not callable(configparam.default):
configparam.__get__(root, type(root))
else:
# We do not want to evaluate now the default value
# when it is a callable.
try:
fetch_val_for_key(configparam.fullname)
# The user provided a value, filter it now.
configparam.__get__(root, type(root))
except KeyError:
pass
setattr(root.__class__, sections[0], configparam)
_config_var_list.append(configparam)
class ConfigParam(object):
def __init__(self, default, filter=None, allow_override=True):
"""
If allow_override is False, we can't change the value after the import
of Theano. So the value should be the same during all the execution.
"""
self.default = default
self.filter = filter
self.allow_override = allow_override
self.is_default = True
# N.B. --
# self.fullname # set by AddConfigVar
# self.doc # set by AddConfigVar
# Note that we do not call `self.filter` on the default value: this
# will be done automatically in AddConfigVar, potentially with a
# more appropriate user-provided default value.
# Calling `filter` here may actually be harmful if the default value is
# invalid and causes a crash or has unwanted side effects.
def __get__(self, cls, type_):
if cls is None:
return self
if not hasattr(self, 'val'):
try:
val_str = fetch_val_for_key(self.fullname)
self.is_default = False
except KeyError:
if callable(self.default):
val_str = self.default()
else:
val_str = self.default
self.__set__(cls, val_str)
# print "RVAL", self.val
return self.val
def __set__(self, cls, val):
if not self.allow_override and hasattr(self, 'val'):
raise Exception(
"Can't change the value of this config parameter "
"after initialization!")
# print "SETTING PARAM", self.fullname,(cls), val
if self.filter:
self.val = self.filter(val)
else:
self.val = val
class EnumStr(ConfigParam):
def __init__(self, default, *options, **kwargs):
self.default = default
self.all = (default,) + options
# All options should be strings
for val in self.all:
if not isinstance(val, string_types):
raise ValueError('Valid values for an EnumStr parameter '
'should be strings', val, type(val))
convert = kwargs.get("convert", None)
def filter(val):
if convert:
val = convert(val)
if val in self.all:
return val
else:
raise ValueError((
'Invalid value ("%s") for configuration variable "%s". '
'Valid options are %s'
% (val, self.fullname, self.all)))
over = kwargs.get("allow_override", True)
super(EnumStr, self).__init__(default, filter, over)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.all)
class TypedParam(ConfigParam):
def __init__(self, default, mytype, is_valid=None, allow_override=True):
self.mytype = mytype
def filter(val):
cast_val = mytype(val)
if callable(is_valid):
if is_valid(cast_val):
return cast_val
else:
raise ValueError(
'Invalid value (%s) for configuration variable '
'"%s".'
% (val, self.fullname), val)
return cast_val
super(TypedParam, self).__init__(default, filter,
allow_override=allow_override)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.mytype)
def StrParam(default, is_valid=None, allow_override=True):
return TypedParam(default, str, is_valid, allow_override=allow_override)
def IntParam(default, is_valid=None, allow_override=True):
return TypedParam(default, int, is_valid, allow_override=allow_override)
def FloatParam(default, is_valid=None, allow_override=True):
return TypedParam(default, float, is_valid, allow_override=allow_override)
def BoolParam(default, is_valid=None, allow_override=True):
# see comment at the beginning of this file.
def booltype(s):
if s in ['False', 'false', '0', False]:
return False
elif s in ['True', 'true', '1', True]:
return True
def is_valid_bool(s):
if s in ['False', 'false', '0', 'True', 'true', '1', False, True]:
return True
else:
return False
if is_valid is None:
is_valid = is_valid_bool
return TypedParam(default, booltype, is_valid,
allow_override=allow_override)
| {
"content_hash": "4cd8b3000be37156c9bdb232c6ac72bc",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 80,
"avg_line_length": 34.864285714285714,
"alnum_prop": 0.5983063579867514,
"repo_name": "cmdunkers/DeeperMind",
"id": "196214722ec6faa50065cc6200877e409984284b",
"size": "14643",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PythonEnv/lib/python2.7/site-packages/theano/configparser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "653032"
},
{
"name": "C++",
"bytes": "3354338"
},
{
"name": "Cuda",
"bytes": "538188"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "124328"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "22186197"
},
{
"name": "Shell",
"bytes": "4377"
}
],
"symlink_target": ""
} |
import copy
from oslo_log import log
from manila.common import constants
from manila.i18n import _, _LI
from manila import utils
LOG = log.getLogger(__name__)
def locked_access_rules_operation(operation):
"""Lock decorator for access rules operations.
Takes a named lock prior to executing the operation. The lock is
named with the ID of the share instance to which the access rule belongs.
Intended use:
If an database operation to retrieve or update access rules uses this
decorator, it will block actions on all access rules of the share
instance until the named lock is free. This is used to avoid race
conditions while performing access rules updates on a given share instance.
"""
def wrapped(*args, **kwargs):
instance_id = kwargs.get('share_instance_id')
@utils.synchronized(
"locked_access_rules_operation_by_share_instance_%s" % instance_id,
external=True)
def locked_operation(*_args, **_kwargs):
return operation(*_args, **_kwargs)
return locked_operation(*args, **kwargs)
return wrapped
class ShareInstanceAccessDatabaseMixin(object):
@locked_access_rules_operation
def get_and_update_share_instance_access_rules_status(
self, context, status=None, conditionally_change=None,
share_instance_id=None):
"""Get and update the access_rules_status of a share instance.
:param status: Set this parameter only if you want to
omit the conditionally_change parameter; i.e, if you want to
force a state change on the share instance regardless of the prior
state.
:param conditionally_change: Set this parameter to a dictionary of rule
state transitions to be made. The key is the expected
access_rules_status and the value is the state to transition the
access_rules_status to. If the state is not as expected,
no transition is performed. Default is {}, which means no state
transitions will be made.
:returns share_instance: if an update was made.
"""
if status is not None:
updates = {'access_rules_status': status}
elif conditionally_change:
share_instance = self.db.share_instance_get(
context, share_instance_id)
access_rules_status = share_instance['access_rules_status']
try:
updates = {
'access_rules_status':
conditionally_change[access_rules_status],
}
except KeyError:
updates = {}
else:
updates = {}
if updates:
share_instance = self.db.share_instance_update(
context, share_instance_id, updates, with_share_data=True)
return share_instance
@locked_access_rules_operation
def get_and_update_share_instance_access_rules(self, context,
filters=None, updates=None,
conditionally_change=None,
share_instance_id=None):
"""Get and conditionally update all access rules of a share instance.
:param updates: Set this parameter to a dictionary of key:value
pairs corresponding to the keys in the ShareInstanceAccessMapping
model. Include 'state' in this dictionary only if you want to
omit the conditionally_change parameter; i.e, if you want to
force a state change on all filtered rules regardless of the prior
state. This parameter is always honored, regardless of whether
conditionally_change allows for a state transition as desired.
Example::
{
'access_key': 'bob007680048318f4239dfc1c192d5',
'access_level': 'ro',
}
:param conditionally_change: Set this parameter to a dictionary of rule
state transitions to be made. The key is the expected state of
the access rule the value is the state to transition the
access rule to. If the state is not as expected, no transition is
performed. Default is {}, which means no state transitions
will be made.
Example::
{
'queued_to_apply': 'applying',
'queued_to_deny': 'denying',
}
"""
instance_rules = self.db.share_access_get_all_for_instance(
context, share_instance_id, filters=filters)
if instance_rules and (updates or conditionally_change):
if not updates:
updates = {}
if not conditionally_change:
conditionally_change = {}
for rule in instance_rules:
mapping_state = rule['state']
rule_updates = copy.deepcopy(updates)
try:
rule_updates['state'] = conditionally_change[mapping_state]
except KeyError:
pass
if rule_updates:
self.db.share_instance_access_update(
context, rule['access_id'], share_instance_id,
rule_updates)
# Refresh the rules after the updates
rules_to_get = {
'access_id': tuple([i['access_id'] for i in instance_rules]),
}
instance_rules = self.db.share_access_get_all_for_instance(
context, share_instance_id, filters=rules_to_get)
return instance_rules
def get_share_instance_access_rules(self, context, filters=None,
share_instance_id=None):
return self.get_and_update_share_instance_access_rules(
context, filters, None, None, share_instance_id)
@locked_access_rules_operation
def get_and_update_share_instance_access_rule(self, context, rule_id,
updates=None,
share_instance_id=None,
conditionally_change=None):
"""Get and conditionally update a given share instance access rule.
:param updates: Set this parameter to a dictionary of key:value
pairs corresponding to the keys in the ShareInstanceAccessMapping
model. Include 'state' in this dictionary only if you want to
omit the conditionally_change parameter; i.e, if you want to
force a state change regardless of the prior state.
:param conditionally_change: Set this parameter to a dictionary of rule
state transitions to be made. The key is the expected state of
the access rule the value is the state to transition the
access rule to. If the state is not as expected, no transition is
performed. Default is {}, which means no state transitions
will be made.
Example::
{
'queued_to_apply': 'applying',
'queued_to_deny': 'denying',
}
"""
instance_rule_mapping = self.db.share_instance_access_get(
context, rule_id, share_instance_id)
if not updates:
updates = {}
if conditionally_change:
mapping_state = instance_rule_mapping['state']
try:
updated_state = conditionally_change[mapping_state]
updates.update({'state': updated_state})
except KeyError:
msg = ("The state of the access rule %(rule_id)s (allowing "
"access to share instance %(si)s) was not updated "
"because its state was modified by another operation.")
msg_payload = {
'si': share_instance_id,
'rule_id': rule_id,
}
LOG.debug(msg, msg_payload)
if updates:
self.db.share_instance_access_update(
context, rule_id, share_instance_id, updates)
# Refresh the rule after update
instance_rule_mapping = self.db.share_instance_access_get(
context, rule_id, share_instance_id)
return instance_rule_mapping
@locked_access_rules_operation
def delete_share_instance_access_rules(self, context, access_rules,
share_instance_id=None):
for rule in access_rules:
self.db.share_instance_access_delete(context, rule['id'])
class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
def __init__(self, db, driver):
self.db = db
self.driver = driver
def update_access_rules(self, context, share_instance_id,
delete_all_rules=False, share_server=None):
"""Update access rules for a given share instance.
:param context: request context
:param share_instance_id: ID of the share instance
:param delete_all_rules: set this parameter to True if all
existing access rules must be denied for a given share instance
:param share_server: Share server model or None
"""
share_instance = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
msg_payload = {
'si': share_instance_id,
'shr': share_instance['share_id'],
}
if delete_all_rules:
updates = {
'state': constants.ACCESS_STATE_QUEUED_TO_DENY,
}
self.get_and_update_share_instance_access_rules(
context, updates=updates, share_instance_id=share_instance_id)
# Is there a sync in progress? If yes, ignore the incoming request.
rule_filter = {
'state': (constants.ACCESS_STATE_APPLYING,
constants.ACCESS_STATE_DENYING),
}
syncing_rules = self.get_and_update_share_instance_access_rules(
context, filters=rule_filter, share_instance_id=share_instance_id)
if syncing_rules:
msg = ("Access rules are being synced for share instance "
"%(si)s belonging to share %(shr)s, any rule changes will "
"be applied shortly.")
LOG.debug(msg, msg_payload)
else:
rules_to_apply_or_deny = (
self._update_and_get_unsynced_access_rules_from_db(
context, share_instance_id)
)
if rules_to_apply_or_deny:
msg = ("Updating access rules for share instance %(si)s "
"belonging to share %(shr)s.")
LOG.debug(msg, msg_payload)
self._update_access_rules(context, share_instance_id,
share_server=share_server)
else:
msg = ("All access rules have been synced for share instance "
"%(si)s belonging to share %(shr)s.")
LOG.debug(msg, msg_payload)
def _update_access_rules(self, context, share_instance_id,
share_server=None):
# Refresh the share instance model
share_instance = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
conditionally_change = {
constants.STATUS_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING,
}
share_instance = (
self.get_and_update_share_instance_access_rules_status(
context, conditionally_change=conditionally_change,
share_instance_id=share_instance_id) or share_instance
)
rules_to_be_removed_from_db = []
# Populate rules to send to the driver
(access_rules_to_be_on_share, add_rules, delete_rules) = (
self._get_rules_to_send_to_driver(context, share_instance)
)
if share_instance['cast_rules_to_readonly']:
# Ensure read/only semantics for a migrating instances
access_rules_to_be_on_share = self._set_rules_to_readonly(
access_rules_to_be_on_share, share_instance)
add_rules = []
rules_to_be_removed_from_db = delete_rules
delete_rules = []
try:
driver_rule_updates = self._update_rules_through_share_driver(
context, share_instance, access_rules_to_be_on_share,
add_rules, delete_rules, rules_to_be_removed_from_db,
share_server)
self._process_driver_rule_updates(
context, driver_rule_updates, share_instance_id)
# Update access rules that are still in 'applying' state
conditionally_change = {
constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ACTIVE,
}
self.get_and_update_share_instance_access_rules(
context, share_instance_id=share_instance_id,
conditionally_change=conditionally_change)
except Exception:
conditionally_change_rule_state = {
constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ERROR,
constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_ERROR,
}
self.get_and_update_share_instance_access_rules(
context, share_instance_id=share_instance_id,
conditionally_change=conditionally_change_rule_state)
conditionally_change_access_rules_status = {
constants.ACCESS_STATE_ACTIVE: constants.STATUS_ERROR,
constants.SHARE_INSTANCE_RULES_SYNCING: constants.STATUS_ERROR,
}
self.get_and_update_share_instance_access_rules_status(
context, share_instance_id=share_instance_id,
conditionally_change=conditionally_change_access_rules_status)
raise
if rules_to_be_removed_from_db:
delete_rules = rules_to_be_removed_from_db
self.delete_share_instance_access_rules(
context, delete_rules, share_instance_id=share_instance['id'])
self._loop_for_refresh_else_update_access_rules_status(
context, share_instance_id, share_server)
msg = _("Access rules were successfully modified for share instance "
"%(si)s belonging to share %(shr)s.")
msg_payload = {
'si': share_instance['id'],
'shr': share_instance['share_id'],
}
LOG.info(msg, msg_payload)
def _update_rules_through_share_driver(self, context, share_instance,
access_rules_to_be_on_share,
add_rules, delete_rules,
rules_to_be_removed_from_db,
share_server):
driver_rule_updates = {}
try:
driver_rule_updates = self.driver.update_access(
context,
share_instance,
access_rules_to_be_on_share,
add_rules=add_rules,
delete_rules=delete_rules,
share_server=share_server
) or {}
except NotImplementedError:
# NOTE(u_glide): Fallback to legacy allow_access/deny_access
# for drivers without update_access() method support
self._update_access_fallback(context, add_rules, delete_rules,
rules_to_be_removed_from_db,
share_instance,
share_server)
return driver_rule_updates
def _loop_for_refresh_else_update_access_rules_status(self, context,
share_instance_id,
share_server):
# Do we need to re-sync or apply any new changes?
if self._check_needs_refresh(context, share_instance_id):
self._update_access_rules(context, share_instance_id,
share_server=share_server)
else:
# Switch the share instance's access_rules_status to 'active'
# if there are no more rules in 'error' state, else, ensure
# 'error' state.
rule_filter = {'state': constants.STATUS_ERROR}
rules_in_error_state = (
self.get_and_update_share_instance_access_rules(
context, filters=rule_filter,
share_instance_id=share_instance_id)
)
if not rules_in_error_state:
conditionally_change = {
constants.SHARE_INSTANCE_RULES_SYNCING:
constants.STATUS_ACTIVE,
constants.SHARE_INSTANCE_RULES_ERROR:
constants.STATUS_ACTIVE,
}
self.get_and_update_share_instance_access_rules_status(
context, conditionally_change=conditionally_change,
share_instance_id=share_instance_id)
else:
conditionally_change = {
constants.SHARE_INSTANCE_RULES_SYNCING:
constants.SHARE_INSTANCE_RULES_ERROR,
}
self.get_and_update_share_instance_access_rules_status(
context, conditionally_change=conditionally_change,
share_instance_id=share_instance_id)
def _process_driver_rule_updates(self, context, driver_rule_updates,
share_instance_id):
for rule_id, rule_updates in driver_rule_updates.items():
if 'state' in rule_updates:
# We allow updates *only* if the state is unchanged from
# the time this update was initiated. It is possible
# that the access rule was denied at the API prior to
# the driver reporting that the access rule was added
# successfully.
state = rule_updates.pop('state')
conditional_state_updates = {
constants.ACCESS_STATE_APPLYING: state,
constants.ACCESS_STATE_DENYING: state,
constants.ACCESS_STATE_ACTIVE: state,
}
else:
conditional_state_updates = {}
self.get_and_update_share_instance_access_rule(
context, rule_id, updates=rule_updates,
share_instance_id=share_instance_id,
conditionally_change=conditional_state_updates)
@staticmethod
def _set_rules_to_readonly(access_rules_to_be_on_share, share_instance):
LOG.debug("All access rules of share instance %s are being "
"cast to read-only for a migration or because the "
"instance is a readable replica.",
share_instance['id'])
for rule in access_rules_to_be_on_share:
rule['access_level'] = constants.ACCESS_LEVEL_RO
return access_rules_to_be_on_share
def _get_rules_to_send_to_driver(self, context, share_instance):
add_rules = []
delete_rules = []
access_filters = {
'state': (constants.ACCESS_STATE_APPLYING,
constants.ACCESS_STATE_ACTIVE,
constants.ACCESS_STATE_DENYING),
}
existing_rules_in_db = self.get_and_update_share_instance_access_rules(
context, filters=access_filters,
share_instance_id=share_instance['id'])
# Update queued rules to transitional states
for rule in existing_rules_in_db:
if rule['state'] == constants.ACCESS_STATE_APPLYING:
add_rules.append(rule)
elif rule['state'] == constants.ACCESS_STATE_DENYING:
delete_rules.append(rule)
delete_rule_ids = [r['id'] for r in delete_rules]
access_rules_to_be_on_share = [
r for r in existing_rules_in_db if r['id'] not in delete_rule_ids
]
return access_rules_to_be_on_share, add_rules, delete_rules
def _check_needs_refresh(self, context, share_instance_id):
rules_to_apply_or_deny = (
self._update_and_get_unsynced_access_rules_from_db(
context, share_instance_id)
)
return any(rules_to_apply_or_deny)
def _update_access_fallback(self, context, add_rules, delete_rules,
remove_rules, share_instance, share_server):
for rule in add_rules:
LOG.info(
_LI("Applying access rule '%(rule)s' for share "
"instance '%(instance)s'"),
{'rule': rule['id'], 'instance': share_instance['id']}
)
self.driver.allow_access(
context,
share_instance,
rule,
share_server=share_server
)
# NOTE(ganso): Fallback mode temporary compatibility workaround
if remove_rules:
delete_rules.extend(remove_rules)
for rule in delete_rules:
LOG.info(
_LI("Denying access rule '%(rule)s' from share "
"instance '%(instance)s'"),
{'rule': rule['id'], 'instance': share_instance['id']}
)
self.driver.deny_access(
context,
share_instance,
rule,
share_server=share_server
)
def _update_and_get_unsynced_access_rules_from_db(self, context,
share_instance_id):
rule_filter = {
'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.ACCESS_STATE_QUEUED_TO_DENY),
}
conditionally_change = {
constants.ACCESS_STATE_QUEUED_TO_APPLY:
constants.ACCESS_STATE_APPLYING,
constants.ACCESS_STATE_QUEUED_TO_DENY:
constants.ACCESS_STATE_DENYING,
}
rules_to_apply_or_deny = (
self.get_and_update_share_instance_access_rules(
context, filters=rule_filter,
share_instance_id=share_instance_id,
conditionally_change=conditionally_change)
)
return rules_to_apply_or_deny
def reset_applying_rules(self, context, share_instance_id):
conditional_updates = {
constants.ACCESS_STATE_APPLYING:
constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.ACCESS_STATE_DENYING:
constants.ACCESS_STATE_QUEUED_TO_DENY,
}
self.get_and_update_share_instance_access_rules(
context, share_instance_id=share_instance_id,
conditionally_change=conditional_updates)
| {
"content_hash": "212e85e24a8b9d2616fc94e23005247c",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 79,
"avg_line_length": 43.03333333333333,
"alnum_prop": 0.5602031155865392,
"repo_name": "vponomaryov/manila",
"id": "6bca410524ba43a2dd317713c50444df580ab967",
"size": "23871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/share/access.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9697997"
},
{
"name": "Shell",
"bytes": "103800"
}
],
"symlink_target": ""
} |
def oracle_connect(ip, port, username, passwd, sid=None, service_name=None):
import cx_Oracle
if sid:
# sid connect
dsn = cx_Oracle.makedsn(ip, port, sid)
instance_status = 1
db = cx_Oracle.connect(username, passwd, dsn)
return db
if service_name:
# service_name connect
handle = ip + ":" + port + "/" + service_name
db = cx_Oracle.connect(username, passwd, handle)
return db
| {
"content_hash": "f55401f29c1754de244adb63b4fc13e8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 35.38461538461539,
"alnum_prop": 0.5869565217391305,
"repo_name": "CreditEaseDBA/Themis",
"id": "cde159e8af49cb15d5560e3686d0a9889ad483dc",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webui/utils/oracle_connect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1696258"
},
{
"name": "CoffeeScript",
"bytes": "124205"
},
{
"name": "Go",
"bytes": "7075"
},
{
"name": "HTML",
"bytes": "518220"
},
{
"name": "JavaScript",
"bytes": "1565150"
},
{
"name": "PHP",
"bytes": "52571"
},
{
"name": "Python",
"bytes": "220060"
},
{
"name": "Shell",
"bytes": "1353"
}
],
"symlink_target": ""
} |
import os
import sys
import importlib
import argparse
__dir__ = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(__dir__, ''))
import cv2
import logging
import numpy as np
from pathlib import Path
ppcv = importlib.import_module('.', 'ppcv')
tools = importlib.import_module('.', 'tools')
tests = importlib.import_module('.', 'tests')
VERSION = '0.1.0'
import yaml
from ppcv.model_zoo.model_zoo import TASK_DICT, list_model, get_config_file
from ppcv.engine.pipeline import Pipeline
from ppcv.utils.logger import setup_logger
logger = setup_logger()
class PaddleCV(object):
def __init__(self,
task_name=None,
config_path=None,
output_dir=None,
run_mode='paddle',
device='CPU'):
if task_name is not None:
assert task_name in TASK_DICT, f"task_name must be one of {list(TASK_DICT.keys())} but got {task_name}"
config_path = get_config_file(task_name)
else:
assert config_path is not None, "task_name and config_path can not be None at the same time!!!"
self.cfg_dict = dict(
config=config_path,
output_dir=output_dir,
run_mode=run_mode,
device=device)
cfg = argparse.Namespace(**self.cfg_dict)
self.pipeline = Pipeline(cfg)
@classmethod
def list_all_supported_tasks(self, ):
logger.info(
f"Tasks and recommanded configs that paddlecv supports are : ")
buffer = yaml.dump(TASK_DICT)
print(buffer)
return
@classmethod
def list_all_supported_models(self, filters=[]):
list_model(filters)
return
def __call__(self, input):
res = self.pipeline.run(input)
return res
| {
"content_hash": "68386c3b2ca990e4b57cfef8b4501e9c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 115,
"avg_line_length": 27.106060606060606,
"alnum_prop": 0.606484069312465,
"repo_name": "PaddlePaddle/models",
"id": "49ba871bcb785e723181da11ca6d85faa542a1ca",
"size": "2400",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/2.3",
"path": "paddlecv/paddlecv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46835"
},
{
"name": "CMake",
"bytes": "8248"
},
{
"name": "Jupyter Notebook",
"bytes": "1720166"
},
{
"name": "Makefile",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "3099697"
},
{
"name": "Shell",
"bytes": "70177"
}
],
"symlink_target": ""
} |
import urllib
from utils import simplejson, geocoder_factory
# http://www.multimap.com/openapidocs/1.2/web_service/ws_geocoding.htm
def geocode(q, api_key):
base_url = 'http://developer.multimap.com/API/geocode/1.2/%s' % urllib.quote(api_key)
json = simplejson.load(urllib.urlopen(base_url + '?' + urllib.urlencode({
'qs': q,
'output': 'json'
})
))
try:
lon = json['result_set'][0]['point']['lon']
lat = json['result_set'][0]['point']['lat']
except (KeyError, IndexError):
return None, (None, None)
name = json['result_set'][0]['address']['display_name']
return name, (lat, lon)
geocoder = geocoder_factory(geocode)
| {
"content_hash": "c13adb6747f071208da2c1d68db99bd1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 89,
"avg_line_length": 33.476190476190474,
"alnum_prop": 0.6088193456614509,
"repo_name": "simonw/geocoders",
"id": "d0400afa4ca24198de3a48e04f9b7242ad7f7fde",
"size": "703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geocoders/multimap.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6885"
}
],
"symlink_target": ""
} |
import operator
import abc
class Expression(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def get_value(self):
pass
@abc.abstractmethod
def __str__(self):
pass
@abc.abstractmethod
def copy(self):
pass
@classmethod
def coerce_operand(cls, operand):
if isinstance(operand, Expression):
return operand
else:
return ConstExpression(operand)
def bind(self, owner):
pass
def get_labels(self):
return set()
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Sub(self, other)
def __rsub__(self, other):
return Sub(other, self)
def __mul__(self, other):
return Mul(self, other)
def __rmul__(self, other):
return Mul(other, self)
def __div__(self, other):
return Div(self, other)
def __floordiv__(self, other):
return FloorDiv(self, other)
def __truediv__(self, other):
return TrueDiv(self, other)
def __rdiv__(self, other):
return Div(other, self)
def __rfloordiv__(self, other):
return FloorDiv(other, self)
def __rtruediv__(self, other):
return TrueDiv(other, self)
def __mod__(self, other):
return Mod(self, other)
def __pow__(self, other):
return Pow(self, other)
def __eq__(self, other):
return Eq(self, other)
def __ne__(self, other):
return Ne(self, other)
def __gt__(self, other):
return Gt(self, other)
def __ge__(self, other):
return Ge(self, other)
def __lt__(self, other):
return Lt(self, other)
def __le__(self, other):
return Le(self, other)
def __and__(self, other):
"""using bitwise and instead of short-circuit logical and"""
return And(self, other)
def __or__(self, other):
"""using bitwise or instead of short-circuit logical or"""
return Or(self, other)
def __not__(self):
"""logical not"""
return Not(self)
def __invert__(self):
"""using bitwise invert as logical not"""
return self.__not__()
def __pos__(self):
return Pos(self)
def __neg__(self):
return Neq(self)
def __abs__(self):
return Abs(self)
def __getitem__(self, index):
return ItemGetter(self, index)
class ConstExpression(Expression):
def __init__(self, const_value):
super(ConstExpression, self).__init__()
self._const_value = const_value
def get_value(self):
return self._const_value
def __str__(self):
return str(self._const_value)
def copy(self):
return self.__class__(self._const_value)
class Operator(Expression):
def __init__(self, *operands):
self._operands = operands
def bind(self, owner):
for operand in self._operands:
operand.bind(owner)
def get_labels(self):
labels = set()
for operand in self._operands:
labels.update(operand.get_labels())
return labels
class BinaryOperator(Operator):
__symbol__ = '?'
def __init__(self, left_operand, right_operand):
self.left_operand = self.coerce_operand(left_operand)
self.right_operand = self.coerce_operand(right_operand)
super(BinaryOperator, self).__init__(self.left_operand, self.right_operand)
def get_value(self):
return self.binary_compute(self.left_operand.get_value(), self.right_operand.get_value())
@abc.abstractmethod
def binary_compute(self, l, r):
pass
def copy(self):
return self.__class__(self.left_operand.copy(), self.right_operand.copy())
def __str__(self):
return "({0} {1} {2})".format(self.left_operand, self.__symbol__, self.right_operand)
class UnaryOperator(Operator):
__metaclass__ = abc.ABCMeta
__symbol__ = '?'
def __init__(self, operand):
self.operand = self.coerce_operand(operand)
super(UnaryOperator, self).__init__(self.operand)
def get_value(self):
return self.unary_compute(self.operand.get_value())
@abc.abstractmethod
def unary_compute(self, o):
pass
def copy(self):
return self.__class__(self.operand.copy())
def __str__(self):
return "({0}{1})".format(self.__symbol__, self.operand)
class Add(BinaryOperator):
__symbol__ = '+'
def binary_compute(self, l, r):
return l + r
class Sub(BinaryOperator):
__symbol__ = '-'
def binary_compute(self, l, r):
return l - r
class Mul(BinaryOperator):
__symbol__ = '*'
def binary_compute(self, l, r):
return l * r
class Div(BinaryOperator):
__symbol__ = '/'
def binary_compute(self, l, r):
return l / r
class FloorDiv(BinaryOperator):
__symbol__ = '//'
def binary_compute(self, l, r):
return l // r
class TrueDiv(BinaryOperator):
__symbol__ = '/'
def binary_compute(self, l, r):
return operator.__truediv__(l, r)
class Pow(BinaryOperator):
__symbol__ = '**'
def binary_compute(self, l, r):
return l ** r
class Mod(BinaryOperator):
__symbol__ = '%'
def binary_compute(self, l, r):
return l % r
class Eq(BinaryOperator):
__symbol__ = '=='
def binary_compute(self, l, r):
return l == r
class Ne(BinaryOperator):
__symbol__ = '!='
def binary_compute(self, l, r):
return l != r
class Gt(BinaryOperator):
__symbol__ = '>'
def binary_compute(self, l, r):
return l > r
class Ge(BinaryOperator):
__symbol__ = '>='
def binary_compute(self, l, r):
return l >= r
class Lt(BinaryOperator):
__symbol__ = '<'
def binary_compute(self, l, r):
return l < r
class Le(BinaryOperator):
__symbol__ = '<='
def binary_compute(self, l, r):
return l <= r
class And(BinaryOperator):
__symbol__ = '&'
def binary_compute(self, l, r):
return l and r
class Or(BinaryOperator):
__symbol__ = '|'
def binary_compute(self, l, r):
return l or r
class Not(UnaryOperator):
__symbol__ = '|'
def unary_compute(self, o):
return not o
class Pos(UnaryOperator):
__symbol__ = '+'
def unary_compute(self, o):
return o
class Neq(UnaryOperator):
__symbol__ = '-'
def unary_compute(self, o):
return -o
class UnaryFunction(UnaryOperator):
__function_name__ = None
def __str__(self):
return "{0}({1})".format(self.__function_name__, self.operand)
class Abs(UnaryFunction):
__function_name__ = "abs"
def unary_compute(self, o):
return abs(o)
class IntCast(UnaryFunction):
__function_name__ = "int"
def unary_compute(self, o):
return int(o)
class FloatCast(UnaryFunction):
__function_name__ = "float"
def unary_compute(self, o):
return float(o)
class StrCast(UnaryFunction):
__function_name__ = "str"
def unary_compute(self, o):
return str(o)
class BoolCast(UnaryFunction):
__function_name__ = "bool"
def unary_compute(self, o):
return bool(o)
class Len(UnaryFunction):
__function_name__ = "len"
def unary_compute(self, o):
return len(o)
class ItemGetter(BinaryOperator):
def binary_compute(self, l, r):
return l[r]
def __str__(self):
return "{0}({1})".format(self.left_operand, self.right_operand)
class AttributeGetter(Expression):
def __init__(self, label):
self._owner = None
self._label = label
super(AttributeGetter, self).__init__()
def bind(self, owner):
self._owner = owner
def get_value(self):
return getattr(self._owner, self._label) #.value
def copy(self):
instance = self.__class__(self._label)
if self._owner is not None:
instance.bind(self._owner)
return instance
def get_labels(self):
return {self._label}
def __str__(self):
return 'BIND.{0}'.format(self._label)
class Binder(object):
def __getattr__(self, label):
return AttributeGetter(label)
BIND = Binder()
| {
"content_hash": "606790ea1d12563c2a8a73aa68bafffb",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 97,
"avg_line_length": 23.394957983193276,
"alnum_prop": 0.5692049808429118,
"repo_name": "simone-campagna/py-structparser",
"id": "42ad312726fa5222de9735a5dc90f9604a3844f2",
"size": "8375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "structparser/bind.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "194538"
}
],
"symlink_target": ""
} |
"""A play local music files example
To use the script:
* Make sure soco is installed
* Drop this script into a folder that, besides python files, contains
nothing but music files
* Choose which player to use and run the script at the command line as such:
play_local_files.py "Living Room"
NOTE: The script has been changed from the earlier version, where the
settings were written directly into the file. They now have to be
given at the command line instead. But, it should only be necessary to
supply the zone name. The local machine IP should be autodetected.
"""
from __future__ import print_function, unicode_literals
import os
import sys
import time
import socket
from threading import Thread
from random import choice
try:
# Python 3
from urllib.parse import quote
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
print('Running as python 3')
except ImportError:
# Python 2
from urllib import quote
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
print('Running as python 2')
from soco.discovery import by_name, discover
class HttpServer(Thread):
"""A simple HTTP Server in its own thread"""
def __init__(self, port):
super(HttpServer, self).__init__()
self.daemon = True
handler = SimpleHTTPRequestHandler
self.httpd = TCPServer(("", port), handler)
def run(self):
"""Start the server"""
print('Start HTTP server')
self.httpd.serve_forever()
def stop(self):
"""Stop the server"""
print('Stop HTTP server')
self.httpd.socket.close()
def add_random_file_from_present_folder(machine_ip, port, zone):
"""Add a random non-py file from this folder and subfolders to soco"""
# Make a list of music files, right now it is done by collection all files
# below the current folder whose extension does not start with .py
# This will probably need to be modded for other pusposes.
music_files = []
print('Looking for music files')
for path, dirs, files in os.walk('.'):
for file_ in files:
if not os.path.splitext(file_)[1].startswith('.py'):
music_files.append(os.path.relpath(os.path.join(path, file_)))
print('Found:', music_files[-1])
random_file = choice(music_files)
# urlencode all the path parts (but not the /'s)
random_file = os.path.join(
*[quote(part) for part in os.path.split(random_file)]
)
print('\nPlaying random file:', random_file)
netpath = 'http://{}:{}/{}'.format(machine_ip, port, random_file)
number_in_queue = zone.add_uri_to_queue(netpath)
# play_from_queue indexes are 0-based
zone.play_from_queue(number_in_queue - 1)
def detect_ip_address():
"""Return the local ip-address"""
# Rather hackish way to get the local ip-address, recipy from
# https://stackoverflow.com/a/166589
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
s.close()
return ip_address
def parse_args():
"""Parse the command line arguments"""
import argparse
description = 'Play local files with Sonos by running a local web server'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('zone', help='The name of the zone to play from')
parser.add_argument('--port', default=8000,
help='The local machine port to run the webser on')
parser.add_argument('--ip', default=detect_ip_address(),
help='The local IP address of this machine. By '
'default it will attempt to autodetect it.')
return parser.parse_args()
def main():
# Settings
args = parse_args()
print(" Will use the following settings:\n"
" Zone: {args.zone}\n"
" IP of this machine: {args.ip}\n"
" Use port: {args.port}".format(args=args))
# Get the zone
zone = by_name(args.zone)
# Check if a zone by the given name was found
if zone is None:
zone_names = [zone_.player_name for zone_ in discover()]
print("No Sonos player named '{}'. Player names are {}"\
.format(args.zone, zone_names))
sys.exit(1)
# Check whether the zone is a coordinator (stand alone zone or
# master of a group)
if not zone.is_coordinator:
print("The zone '{}' is not a group master, and therefore cannot "
"play music. Please use '{}' in stead"\
.format(args.zone, zone.group.coordinator.player_name))
sys.exit(2)
# Setup and start the http server
server = HttpServer(args.port)
server.start()
# When the http server is setup you can really add your files in
# any way that is desired. The source code for
# add_random_file_from_present_folder is just an example, but it may be
# helpful in figuring out how to format the urls
try:
add_random_file_from_present_folder(args.ip, args.port, zone)
# Remember the http server runs in its own daemonized thread, so it is
# necessary to keep the main thread alive. So sleep for 3 years.
time.sleep(10**8)
except KeyboardInterrupt:
server.stop()
main()
| {
"content_hash": "30e4b101ed1677867fca69fad95ea41a",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 78,
"avg_line_length": 33.61006289308176,
"alnum_prop": 0.6538173652694611,
"repo_name": "dajobe/SoCo",
"id": "4f43dfccbe656e1631ca06d0bd46c538caf85cf8",
"size": "5344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/play_local_files/play_local_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "168"
},
{
"name": "Makefile",
"bytes": "368"
},
{
"name": "Python",
"bytes": "640813"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
"""Tests for flag_util.py."""
import unittest
from perfkitbenchmarker import flag_util
class TestIntegerList(unittest.TestCase):
def testSimpleLength(self):
il = flag_util.IntegerList([1, 2, 3])
self.assertEqual(len(il), 3)
def testRangeLength(self):
il = flag_util.IntegerList([1, (2, 5), 9])
self.assertEqual(len(il), 6)
def testSimpleGetItem(self):
il = flag_util.IntegerList([1, 2, 3])
self.assertEqual(il[0], 1)
self.assertEqual(il[1], 2)
self.assertEqual(il[2], 3)
def testOutOfRangeIndexError(self):
il = flag_util.IntegerList([1, 2, 3])
with self.assertRaises(IndexError):
il[4]
def testRangeGetItem(self):
il = flag_util.IntegerList([1, (2, 5), 9])
self.assertEqual(il[1], 2)
self.assertEqual(il[2], 3)
self.assertEqual(il[5], 9)
def testIter(self):
il = flag_util.IntegerList([1, (2, 5), 9])
self.assertEqual(list(il), [1, 2, 3, 4, 5, 9])
class TestParseIntegerList(unittest.TestCase):
def setUp(self):
self.ilp = flag_util.IntegerListParser()
def testOneInteger(self):
self.assertEqual(list(self.ilp.Parse('3')), [3])
def testIntegerRange(self):
self.assertEqual(list(self.ilp.Parse('3-5')), [3, 4, 5])
def testIntegerList(self):
self.assertEqual(list(self.ilp.Parse('3-5,8,10-12')),
[3, 4, 5, 8, 10, 11, 12])
def testNoInteger(self):
with self.assertRaises(ValueError):
self.ilp.Parse('a')
def testBadRange(self):
with self.assertRaises(ValueError):
self.ilp.Parse('3-a')
def testBadList(self):
with self.assertRaises(ValueError):
self.ilp.Parse('3-5,8a')
def testTrailingComma(self):
with self.assertRaises(ValueError):
self.ilp.Parse('3-5,')
def testNonIncreasingEntries(self):
ilp = flag_util.IntegerListParser(
on_nonincreasing=flag_util.IntegerListParser.EXCEPTION)
with self.assertRaises(ValueError):
ilp.Parse('3,2,1')
def testNonIncreasingRange(self):
ilp = flag_util.IntegerListParser(
on_nonincreasing=flag_util.IntegerListParser.EXCEPTION)
with self.assertRaises(ValueError):
ilp.Parse('3-1')
class TestIntegerListSerializer(unittest.TestCase):
def testSerialize(self):
ser = flag_util.IntegerListSerializer()
il = flag_util.IntegerList([1, (2, 5), 9])
self.assertEqual(ser.Serialize(il),
'1,2-5,9')
| {
"content_hash": "7f575a27687e55e7be32757e8ca0dd99",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 63,
"avg_line_length": 27.318181818181817,
"alnum_prop": 0.6580698835274542,
"repo_name": "kivio/PerfKitBenchmarker",
"id": "eebcae05830683773653959a3cf01ce756963c7a",
"size": "3015",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_flag_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1128835"
},
{
"name": "Shell",
"bytes": "25650"
}
],
"symlink_target": ""
} |
from openstack.tests.unit import base
from openstack.orchestration.v1 import stack_environment as se
FAKE = {
'encrypted_param_names': ['n1', 'n2'],
'event_sinks': {
's1': 'v1'
},
'parameters': {
'key_name': {
'type': 'string'
}
},
'parameter_defaults': {
'p1': 'def1'
},
'resource_registry': {
'resources': {
'type1': 'type2'
}
},
}
class TestStackTemplate(base.TestCase):
def test_basic(self):
sot = se.StackEnvironment()
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
def test_make_it(self):
sot = se.StackEnvironment(**FAKE)
self.assertEqual(FAKE['encrypted_param_names'],
sot.encrypted_param_names)
self.assertEqual(FAKE['event_sinks'], sot.event_sinks)
self.assertEqual(FAKE['parameters'], sot.parameters)
self.assertEqual(FAKE['parameter_defaults'], sot.parameter_defaults)
self.assertEqual(FAKE['resource_registry'], sot.resource_registry)
| {
"content_hash": "23f3eb632a2569ceca9a64ae87554243",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 27.636363636363637,
"alnum_prop": 0.5921052631578947,
"repo_name": "dtroyer/python-openstacksdk",
"id": "08737dc7e428cd42efd2ed48eb794205f268644d",
"size": "1762",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/orchestration/v1/test_stack_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3803161"
},
{
"name": "Shell",
"bytes": "9027"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, render_to_response, redirect
from django.views.generic.base import View
from decision_engine.settings import DEBUG
from venue.models import Event
from decision.models import Decision, DecisionAnswers
from guest.models import Guest, GuestAnswers
from django.http import HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
class GuestView(View):
def __init__(self):
View.__init__(self)
self.current_request = None
self.profile = None
self.parameter_dict = {}
def dispatch(self, request, *args, **kwargs):
self.parameter_dict = dict()
self.current_request = request
self.parameter_dict['guest_ip'] = get_client_ip(request)
self.parameter_dict['DEBUG'] = DEBUG
return super(GuestView, self).dispatch(request, *args, **kwargs)
class GuestSignUp(GuestView):
def get(self , *args, **kwargs):
eid = kwargs['eid']
#event =
print eid
print self.parameter_dict['guest_ip']
return render_to_response('guest/register.html', self.parameter_dict)
class GuestVote(GuestView):
def get(self , request, eid):
event = Event.objects.get(id=eid)
self.parameter_dict['event'] = event
self.parameter_dict['notification_type'] = 'success'
if not DEBUG:
if event.internal_only:
if event.venue.low_ip_range:
if event.venue.low_ip_range != self.parameter_dict['guest_ip']:
return render_to_response('guest/invalid_ip.html', self.parameter_dict)
if event.requires_credentials:
if 'registered' not in request.session:
return render_to_response('guest/register.html', self.parameter_dict)
try:
decision = Decision.objects.get(event=event, published=True)
self.parameter_dict['decision'] = decision
message = decision.question_to_be_asked
except Decision.DoesNotExist:
decision = None
message = "There is nothing to vote on at the current moment"
self.parameter_dict['notification_type'] = 'danger'
except Decision.MultipleObjectsReturned:
self.parameter_dict['notification_type'] = 'danger'
message = "More than 1 decision is published error...system error"
decision = None
if decision:
decision_answers = DecisionAnswers.objects.filter(decision = decision)
self.parameter_dict['decision_answers'] = decision_answers
if event.venue.low_ip_range == self.parameter_dict['guest_ip']:
event.number_of_internal_users =+ 1
else:
event.number_of_external_users = + 1
event.save()
if 'guest_id' not in request.session:
guest = Guest(first_name="anon", ip=self.parameter_dict['guest_ip'])
guest.save()
request.session['guest_id'] = guest.id
else:
self.parameter_dict['decision_answers'] = None
self.parameter_dict['message'] = message
return render_to_response('guest/vote.html', self.parameter_dict)
def vote(request, da_id):
decision_answer = DecisionAnswers.objects.get(id=da_id)
guest = Guest.objects.get(id=request.session['guest_id'])
ga = GuestAnswers(guest=guest,decision_answers = decision_answer )
ga.save()
message = "Success"
return HttpResponse(json.dumps(message), content_type='application/json')
@csrf_exempt
def register(request):
request.session['registered'] = True
if 'guest_id' not in request.session:
guest = Guest(first_name=request.POST.get("first_name"), zip=request.POST.get("zip_code"))
guest.save()
request.session['guest_id'] = guest.id
return redirect('guest.views.GuestVote', eid=request.POST.get("event_id"))
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip | {
"content_hash": "0c02c198492e92e9cf6e7b19076c9cfa",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 98,
"avg_line_length": 37.13274336283186,
"alnum_prop": 0.6341754051477597,
"repo_name": "civiclee/Hack4Cause2017",
"id": "a363272fabcc7864136bd6f91056ce1963bcf388",
"size": "4196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/askeugene/guest/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "387"
},
{
"name": "C#",
"bytes": "93689"
},
{
"name": "CSS",
"bytes": "2985545"
},
{
"name": "GLSL",
"bytes": "1732"
},
{
"name": "HTML",
"bytes": "1332796"
},
{
"name": "Java",
"bytes": "3653"
},
{
"name": "JavaScript",
"bytes": "4022838"
},
{
"name": "Jupyter Notebook",
"bytes": "210359"
},
{
"name": "Objective-C",
"bytes": "5255"
},
{
"name": "PHP",
"bytes": "15253449"
},
{
"name": "PLSQL",
"bytes": "1307152"
},
{
"name": "Python",
"bytes": "90164"
},
{
"name": "Shell",
"bytes": "708"
},
{
"name": "TypeScript",
"bytes": "208784"
}
],
"symlink_target": ""
} |
import copy
import re
from collections import OrderedDict
from collections import defaultdict
from app.data_model.answer_store import natural_order
from app.questionnaire_state.state_answer import StateAnswer
from app.questionnaire_state.state_question import StateQuestion
class RepeatingAnswerStateQuestion(StateQuestion):
def __init__(self, item_id, schema_item):
super().__init__(item_id=item_id, schema_item=schema_item)
def update_state(self, user_input):
self.build_repeating_state(user_input)
self.children = self.answers
for state_answer in self.answers:
state_answer.update_state(user_input)
def build_repeating_state(self, user_input):
for answer_id, answer_index in iterate_over_instance_ids(user_input.keys()):
for answer_schema in self.schema_item.answers:
if answer_schema.id == answer_id and self.is_new_answer_state_required(answer_schema, answer_index):
new_answer_state = self.create_new_answer_state(answer_schema, answer_index)
self.add_new_answer_state(new_answer_state)
break
def is_new_answer_state_required(self, answer_schema, answer_instance):
for answer_state in self.answers:
if answer_schema.id == answer_state.id and answer_instance == answer_state.answer_instance:
return False
return True
def create_new_answer_state(self, answer_schema, answer_instance, group_instance=0):
new_answer_schema = copy.copy(answer_schema)
suffix = '_' + str(answer_instance) if answer_instance > 0 else ''
widget_id = answer_schema.id + suffix
new_answer_schema.widget = type(answer_schema.widget)(widget_id)
new_answer_state = StateAnswer(new_answer_schema.id, new_answer_schema)
new_answer_state.answer_instance = answer_instance
new_answer_state.group_instance = group_instance
new_answer_state.parent = self
return new_answer_state
def add_new_answer_state(self, answer_state):
self.answers.append(answer_state)
def answers_grouped_by_instance(self):
"""
Groups answers by their answer_instance Id.
:return: A list of lists containing the answers grouped by answer_instance.
"""
answer_states_by_id = defaultdict(list)
for answer_state in self.answers:
answer_states_by_id[answer_state.id].append(answer_state)
answer_states_grouped_by_instance = OrderedDict()
for answer_schema in self.schema_item.answers:
answer_states = answer_states_by_id.get(answer_schema.id)
if answer_states:
for answer_state in answer_states:
answer_states_grouped_by_instance.setdefault(answer_state.answer_instance, []).append(answer_state)
return list(answer_states_grouped_by_instance.values())
def iterate_over_instance_ids(answer_instances):
"""
Iterates over a collection of answer instances yielding the answer Id and answer instance Id.
:param answer_instances: A list of raw answer_instance_ids
:return: Tuple containing the answer Id and answer instance Id.
"""
answer_instance_ids = sorted(answer_instances, key=natural_order)
for answer_instance_id in answer_instance_ids:
answer_id, answer_index = extract_answer_instance_id(answer_instance_id)
yield answer_id, answer_index
def extract_answer_instance_id(answer_instance_id):
matches = re.match(r'^(.+?)_(\d+)$', answer_instance_id)
if matches:
answer_id, index = matches.groups()
else:
answer_id = answer_instance_id
index = 0
return answer_id, int(index)
| {
"content_hash": "5b683d48dcd7d5ed62e89c39c877af96",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 119,
"avg_line_length": 41.252747252747255,
"alnum_prop": 0.6728822589238146,
"repo_name": "qateam123/eq",
"id": "b73fc12cc6a5b6647b97812936487ba9f64b4c59",
"size": "3754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/questionnaire_state/state_repeating_answer_question.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56444"
},
{
"name": "HTML",
"bytes": "64720"
},
{
"name": "JavaScript",
"bytes": "752517"
},
{
"name": "Python",
"bytes": "735531"
},
{
"name": "Shell",
"bytes": "7685"
}
],
"symlink_target": ""
} |
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally import osclients
from rally.plugins.openstack.cleanup import manager as resource_manager
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack import types
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="servers", order=430)
class ServerGenerator(context.Context):
"""Context class for adding temporary servers for benchmarks.
Servers are added for each tenant.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"flavor": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"servers_per_tenant": {
"type": "integer",
"minimum": 1
},
"auto_assign_nic": {
"type": "boolean",
}
},
"required": ["image", "flavor"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"servers_per_tenant": 5,
"auto_assign_nic": False
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `Servers`"))
def setup(self):
image = self.config["image"]
flavor = self.config["flavor"]
auto_nic = self.config["auto_assign_nic"]
servers_per_tenant = self.config["servers_per_tenant"]
clients = osclients.Clients(self.context["users"][0]["credential"])
image_id = types.GlanceImage.transform(clients=clients,
resource_config=image)
flavor_id = types.Flavor.transform(clients=clients,
resource_config=flavor)
for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants(
self.context["users"])):
LOG.debug("Booting servers for user tenant %s "
% (user["tenant_id"]))
tmp_context = {"user": user,
"tenant": self.context["tenants"][tenant_id],
"task": self.context["task"],
"iteration": iter_}
nova_scenario = nova_utils.NovaScenario(tmp_context)
LOG.debug("Calling _boot_servers with image_id=%(image_id)s "
"flavor_id=%(flavor_id)s "
"servers_per_tenant=%(servers_per_tenant)s"
% {"image_id": image_id,
"flavor_id": flavor_id,
"servers_per_tenant": servers_per_tenant})
servers = nova_scenario._boot_servers(image_id, flavor_id,
requests=servers_per_tenant,
auto_assign_nic=auto_nic)
current_servers = [server.id for server in servers]
LOG.debug("Adding booted servers %s to context"
% current_servers)
self.context["tenants"][tenant_id][
"servers"] = current_servers
@logging.log_task_wrapper(LOG.info, _("Exit context: `Servers`"))
def cleanup(self):
resource_manager.cleanup(names=["nova.servers"],
users=self.context.get("users", []))
| {
"content_hash": "0be9473be67df19c21fb68755bf29b1c",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 35.92307692307692,
"alnum_prop": 0.5,
"repo_name": "eayunstack/rally",
"id": "aeb293a2c9a68afe9b63056b7375bb21e1b0e319",
"size": "4302",
"binary": false,
"copies": "2",
"ref": "refs/heads/product",
"path": "rally/plugins/openstack/context/nova/servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36716"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2988245"
},
{
"name": "Shell",
"bytes": "41128"
}
],
"symlink_target": ""
} |
"""
This module defines base classes for worker, aggregator and stats
processor.
Look at pyaloha.main module for a pipeline sequence understanding.
"""
from __future__ import print_function
import multiprocessing
import shutil
import traceback
from pyaloha.event_factory import EventFactory
from pyaloha.protocol import WorkerResults, to_unicode, SerializableSet
class ShareableData(object):
"""
Base class for both worker and aggregator
to optional access of the same routine of instantiating same data properties.
Can be used like (guaranteed to run in __init__):
def shared_method(instance, *constructor_args, **constructor_kwargs):
instance.mega_storage = collections.defaultdict(list)
class Worker(BaseWorker):
setup_shareable_data = shared_method
...
class Aggregator(BaseAggregator):
setup_shareable_data = shared_method
...
"""
def setup_shareable_data(self):
self.lost_data = SerializableSet()
class DataStreamWorker(ShareableData):
"""
This is a base class representing a worker that preprocesses
given raw events (one by one).
This worker is not guaranteed to have all available data, so do not try
to write a stats processor class. This is a low-level preprocessor/filter/etc.
@method process_unspecified is a generic method called on every event
regardless of its actual type. In general this is the most basic method
to overload while writing worker in your script.
__events__ field is used in low-level filtering of the events provided to
specific worker.
"""
__events__ = tuple()
def __init__(self,
event_factory=None,
*args, **kwargs):
super(DataStreamWorker, self).__init__(*args, **kwargs)
self._event_factory = event_factory or EventFactory(custom_events=[])
self.setup_shareable_data(*args, **kwargs)
def process_unspecified(self, event):
pass
def process_event(self,
key, event_time_p, user_info_p,
str_data_p, str_data_len):
"""
Main callback used in the main event stream processing loop
"""
try:
ev = self._event_factory.make_event(
key, event_time_p[0], user_info_p[0],
str_data_p, str_data_len
)
ev.process_me(self)
except Exception:
logger = multiprocessing.get_logger()
logger.error(traceback.format_exc())
def dumps_results(self):
return WorkerResults.dumps_object(self, debug=False)
def pre_output(self):
pass
class DataAggregator(ShareableData):
"""
This is a 'singleton' class that accumulates results from the workers.
@method aggregate must be overloaded in your script.
It is called every time a worker is done with its events to accumulate results.
@method post_aggregate is an optional method.
It is called after all results are accumulated.
Look for an example in daily_over_fs usage pattern.
"""
def __init__(self,
results_dir=None, *args, **kwargs):
super(DataAggregator, self).__init__(*args, **kwargs)
self.logger = multiprocessing.get_logger()
self.results_dir = results_dir
if self.results_dir:
self.created_dirs = set()
shutil.rmtree(self.results_dir, ignore_errors=True)
self.logger.info(
"Results directory '%s' is set and cleaned" % self.results_dir
)
self.setup_shareable_data(*args, **kwargs)
def aggregate(self):
raise NotImplementedError()
def post_aggregate(self, pool=None):
pass
class StatsProcessor(object):
"""
This is a fully prepared stats data processor and printer.
It is instantiated with a pointer to aggregator that has already been done
with all workers and his own postprocessing.
Class can be used as a business logic processor
(it is guaranteed to have all available data) or just as a sequential printer
(if actual stats processing logic is simple).
@method gen_stats must be overloaded in your script.
It is called at the end of the stats processing pipeline and
yields one or several stats sections - each with a human-readable text header
and a sequence of sequence objects interpreted as a table of values.
@method process_stats is optional.
It is called just before calling gen_stats.
"""
def __init__(self, aggregator):
self.aggregator = aggregator
def process_stats(self):
pass
def gen_stats(self):
"""
Look at @method print_stats for a results format to be generated
"""
raise NotImplementedError()
def print_stats(self):
for header, stats_generator in self.gen_stats():
print('-' * 20)
print(header)
print('-' * 20)
for row in stats_generator:
print(
u'\t'.join(map(to_unicode, row))
)
print()
| {
"content_hash": "ef954135100879910238e11ba90a1c15",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 79,
"avg_line_length": 31.7375,
"alnum_prop": 0.6478928712091374,
"repo_name": "ruilin/RLMap",
"id": "7abf77621d9d2e34259122d0c9b91089f7bf0c54",
"size": "5078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3party/Alohalytics/snippets/pyaloha/base.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3962"
},
{
"name": "Batchfile",
"bytes": "6451"
},
{
"name": "C",
"bytes": "15370980"
},
{
"name": "C#",
"bytes": "2505826"
},
{
"name": "C++",
"bytes": "146782061"
},
{
"name": "CMake",
"bytes": "270225"
},
{
"name": "CSS",
"bytes": "26798"
},
{
"name": "Common Lisp",
"bytes": "17521"
},
{
"name": "DIGITAL Command Language",
"bytes": "36710"
},
{
"name": "Dockerfile",
"bytes": "13403"
},
{
"name": "Emacs Lisp",
"bytes": "7822"
},
{
"name": "GLSL",
"bytes": "47102"
},
{
"name": "Gherkin",
"bytes": "305230"
},
{
"name": "Go",
"bytes": "21320"
},
{
"name": "HTML",
"bytes": "1170286"
},
{
"name": "IDL",
"bytes": "10263"
},
{
"name": "Inno Setup",
"bytes": "4337"
},
{
"name": "Java",
"bytes": "5581298"
},
{
"name": "JavaScript",
"bytes": "511179"
},
{
"name": "Lua",
"bytes": "57672"
},
{
"name": "M4",
"bytes": "129381"
},
{
"name": "Makefile",
"bytes": "603329"
},
{
"name": "Module Management System",
"bytes": "2080"
},
{
"name": "Objective-C",
"bytes": "4727440"
},
{
"name": "Objective-C++",
"bytes": "1182400"
},
{
"name": "PHP",
"bytes": "467172"
},
{
"name": "Perl",
"bytes": "57807"
},
{
"name": "PowerShell",
"bytes": "1885"
},
{
"name": "Python",
"bytes": "1988301"
},
{
"name": "QMake",
"bytes": "137217"
},
{
"name": "Roff",
"bytes": "13545"
},
{
"name": "Ruby",
"bytes": "282599"
},
{
"name": "Shell",
"bytes": "1455455"
},
{
"name": "Swift",
"bytes": "135818"
},
{
"name": "TSQL",
"bytes": "3530"
},
{
"name": "Vim Script",
"bytes": "3759"
},
{
"name": "sed",
"bytes": "236"
}
],
"symlink_target": ""
} |
"""
build.py
~~~~~~~~
This module builds a bloomfilter from the NSRL Whitelist Database.
:copyright: (c) 2014 by Josh "blacktop" Maine.
:license: GPLv3
"""
import os
import binascii
from pybloom import BloomFilter
nsrl_path = '/nsrl/NSRLFile.txt'
error_rate = 0.01
# reference - http://stackoverflow.com/a/9631635
def blocks(this_file, size=65536):
while True:
b = this_file.read(size)
if not b:
break
yield b
def main():
if os.path.isfile(nsrl_path):
print "BUILDING: Reading in NSRL Database"
with open(nsrl_path) as f_line:
# Strip off header
_ = f_line.readline()
print "BUILDING: Calculating number of hashes in NSRL..."
num_lines = sum(bl.count("\n") for bl in blocks(f_line))
print "BUILDING: There are %s hashes in the NSRL Database" % num_lines
with open(nsrl_path) as f_nsrl:
# Strip off header
_ = f_nsrl.readline()
print "BUILDING: Creating bloomfilter"
bf = BloomFilter(num_lines, error_rate)
print "BUILDING: Inserting hashes into bloomfilter"
for line in f_nsrl:
md5_hash = line.split(",")[1].strip('"')
if md5_hash:
try:
md5 = binascii.unhexlify(md5_hash)
bf.add(md5)
except Exception as e:
print "ERROR: %s" % e
print "BUILDING: NSRL bloomfilter contains {} items.".format(len(bf))
with open('nsrl.bloom', 'wb') as nb:
bf.tofile(nb)
print "BUILDING: Complete"
else:
print("ERROR: No such file or directory: %s", nsrl_path)
return
if __name__ == "__main__":
main()
| {
"content_hash": "52ad35753e88c18b70b3ecffc1bc8005",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.5439469320066335,
"repo_name": "kost/docker-kf",
"id": "a25c29aeb5f78bc93833d844fa8907c0b6575792",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nsrl/scripts/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3251"
},
{
"name": "Shell",
"bytes": "5966"
}
],
"symlink_target": ""
} |
"""
Tests for the public interface of Automat.
"""
from functools import reduce
from unittest import TestCase
from .. import MethodicalMachine, NoTransition
class MethodicalTests(TestCase):
"""
Tests for L{MethodicalMachine}.
"""
def test_oneTransition(self):
"""
L{MethodicalMachine} provides a way for you to declare a state machine
with inputs, outputs, and states as methods. When you have declared an
input, an output, and a state, calling the input method in that state
will produce the specified output.
"""
class Machination(object):
machine = MethodicalMachine()
@machine.input()
def anInput(self):
"an input"
@machine.output()
def anOutput(self):
"an output"
return "an-output-value"
@machine.output()
def anotherOutput(self):
"another output"
return "another-output-value"
@machine.state(initial=True)
def anState(self):
"a state"
@machine.state()
def anotherState(self):
"another state"
anState.upon(anInput, enter=anotherState, outputs=[anOutput])
anotherState.upon(anInput, enter=anotherState,
outputs=[anotherOutput])
m = Machination()
self.assertEqual(m.anInput(), ["an-output-value"])
self.assertEqual(m.anInput(), ["another-output-value"])
def test_machineItselfIsPrivate(self):
"""
L{MethodicalMachine} is an implementation detail. If you attempt to
access it on an instance of your class, you will get an exception.
However, since tools may need to access it for the purposes of, for
example, visualization, you may access it on the class itself.
"""
expectedMachine = MethodicalMachine()
class Machination(object):
machine = expectedMachine
machination = Machination()
with self.assertRaises(AttributeError) as cm:
machination.machine
self.assertIn("MethodicalMachine is an implementation detail",
str(cm.exception))
self.assertIs(Machination.machine, expectedMachine)
def test_outputsArePrivate(self):
"""
One of the benefits of using a state machine is that your output method
implementations don't need to take invalid state transitions into
account - the methods simply won't be called. This property would be
broken if client code called output methods directly, so output methods
are not directly visible under their names.
"""
class Machination(object):
machine = MethodicalMachine()
counter = 0
@machine.input()
def anInput(self):
"an input"
@machine.output()
def anOutput(self):
self.counter += 1
@machine.state(initial=True)
def state(self):
"a machine state"
state.upon(anInput, enter=state, outputs=[anOutput])
mach1 = Machination()
mach1.anInput()
self.assertEqual(mach1.counter, 1)
mach2 = Machination()
with self.assertRaises(AttributeError) as cm:
mach2.anOutput
self.assertEqual(mach2.counter, 0)
self.assertIn(
"Machination.anOutput is a state-machine output method; to "
"produce this output, call an input method instead.",
str(cm.exception)
)
def test_multipleMachines(self):
"""
Two machines may co-exist happily on the same instance; they don't
interfere with each other.
"""
class MultiMach(object):
a = MethodicalMachine()
b = MethodicalMachine()
@a.input()
def inputA(self):
"input A"
@b.input()
def inputB(self):
"input B"
@a.state(initial=True)
def initialA(self):
"initial A"
@b.state(initial=True)
def initialB(self):
"initial B"
@a.output()
def outputA(self):
return "A"
@b.output()
def outputB(self):
return "B"
initialA.upon(inputA, initialA, [outputA])
initialB.upon(inputB, initialB, [outputB])
mm = MultiMach()
self.assertEqual(mm.inputA(), ["A"])
self.assertEqual(mm.inputB(), ["B"])
def test_collectOutputs(self):
"""
Outputs can be combined with the "collector" argument to "upon".
"""
import operator
class Machine(object):
m = MethodicalMachine()
@m.input()
def input(self):
"an input"
@m.output()
def outputA(self):
return "A"
@m.output()
def outputB(self):
return "B"
@m.state(initial=True)
def state(self):
"a state"
state.upon(input, state, [outputA, outputB],
collector=lambda x: reduce(operator.add, x))
m = Machine()
self.assertEqual(m.input(), "AB")
def test_methodName(self):
"""
Input methods preserve their declared names.
"""
class Mech(object):
m = MethodicalMachine()
@m.input()
def declaredInputName(self):
"an input"
@m.state(initial=True)
def aState(self):
"state"
m = Mech()
with self.assertRaises(TypeError) as cm:
m.declaredInputName("too", "many", "arguments")
self.assertIn("declaredInputName", str(cm.exception))
def test_inputWithArguments(self):
"""
If an input takes an argument, it will pass that along to its output.
"""
class Mechanism(object):
m = MethodicalMachine()
@m.input()
def input(self, x, y=1):
"an input"
@m.state(initial=True)
def state(self):
"a state"
@m.output()
def output(self, x, y=1):
self._x = x
return x + y
state.upon(input, state, [output])
m = Mechanism()
self.assertEqual(m.input(3), [4])
self.assertEqual(m._x, 3)
def test_inputOutputMismatch(self):
"""
All the argument lists of the outputs for a given input must match; if
one does not the call to C{upon} will raise a C{TypeError}.
"""
class Mechanism(object):
m = MethodicalMachine()
@m.input()
def nameOfInput(self, a):
"an input"
@m.output()
def outputThatMatches(self, a):
"an output that matches"
@m.output()
def outputThatDoesntMatch(self, b):
"an output that doesn't match"
@m.state()
def state(self):
"a state"
with self.assertRaises(TypeError) as cm:
state.upon(nameOfInput, state, [outputThatMatches,
outputThatDoesntMatch])
self.assertIn("nameOfInput", str(cm.exception))
self.assertIn("outputThatDoesntMatch", str(cm.exception))
def test_multipleInitialStatesFailure(self):
"""
A L{MethodicalMachine} can only have one initial state.
"""
class WillFail(object):
m = MethodicalMachine()
@m.state(initial=True)
def firstInitialState(self):
"The first initial state -- this is OK."
with self.assertRaises(ValueError):
@m.state(initial=True)
def secondInitialState(self):
"The second initial state -- results in a ValueError."
def test_badTransitionForCurrentState(self):
"""
Calling any input method that lacks a transition for the machine's
current state raises an informative L{NoTransition}.
"""
class OnlyOnePath(object):
m = MethodicalMachine()
@m.state(initial=True)
def start(self):
"Start state."
@m.state()
def end(self):
"End state."
@m.input()
def advance(self):
"Move from start to end."
@m.input()
def deadEnd(self):
"A transition from nowhere to nowhere."
start.upon(advance, end, [])
machine = OnlyOnePath()
with self.assertRaises(NoTransition) as cm:
machine.deadEnd()
self.assertIn("deadEnd", str(cm.exception))
self.assertIn("start", str(cm.exception))
machine.advance()
with self.assertRaises(NoTransition) as cm:
machine.deadEnd()
self.assertIn("deadEnd", str(cm.exception))
self.assertIn("end", str(cm.exception))
def test_saveState(self):
"""
L{MethodicalMachine.serializer} is a decorator that modifies its
decoratee's signature to take a "state" object as its first argument,
which is the "serialized" argument to the L{MethodicalMachine.state}
decorator.
"""
class Mechanism(object):
m = MethodicalMachine()
def __init__(self):
self.value = 1
@m.state(serialized="first-state", initial=True)
def first(self):
"First state."
@m.state(serialized="second-state")
def second(self):
"Second state."
@m.serializer()
def save(self, state):
return {
'machine-state': state,
'some-value': self.value,
}
self.assertEqual(
Mechanism().save(),
{
"machine-state": "first-state",
"some-value": 1,
}
)
def test_restoreState(self):
"""
L{MethodicalMachine.unserializer} decorates a function that becomes a
machine-state unserializer; its return value is mapped to the
C{serialized} parameter to C{state}, and the L{MethodicalMachine}
associated with that instance's state is updated to that state.
"""
class Mechanism(object):
m = MethodicalMachine()
def __init__(self):
self.value = 1
self.ranOutput = False
@m.state(serialized="first-state", initial=True)
def first(self):
"First state."
@m.state(serialized="second-state")
def second(self):
"Second state."
@m.input()
def input(self):
"an input"
@m.output()
def output(self):
self.value = 2
self.ranOutput = True
return 1
@m.output()
def output2(self):
return 2
first.upon(input, second, [output],
collector=lambda x: list(x)[0])
second.upon(input, second, [output2],
collector=lambda x: list(x)[0])
@m.serializer()
def save(self, state):
return {
'machine-state': state,
'some-value': self.value,
}
@m.unserializer()
def _restore(self, blob):
self.value = blob['some-value']
return blob['machine-state']
@classmethod
def fromBlob(cls, blob):
self = cls()
self._restore(blob)
return self
m1 = Mechanism()
m1.input()
blob = m1.save()
m2 = Mechanism.fromBlob(blob)
self.assertEqual(m2.ranOutput, False)
self.assertEqual(m2.input(), 2)
self.assertEqual(
m2.save(),
{
'machine-state': 'second-state',
'some-value': 2,
}
)
# FIXME: error for wrong types on any call to _oneTransition
# FIXME: better public API for .upon; maybe a context manager?
# FIXME: when transitions are defined, validate that we can always get to
# terminal? do we care about this?
# FIXME: implementation (and use-case/example) for passing args from in to out
# FIXME: possibly these need some kind of support from core
# FIXME: wildcard state (in all states, when input X, emit Y and go to Z)
# FIXME: wildcard input (in state X, when any input, emit Y and go to Z)
# FIXME: combined wildcards (in any state for any input, emit Y go to Z)
| {
"content_hash": "7ea95099f5a0fdc1d2a4004afae2ccb7",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 79,
"avg_line_length": 32.849624060150376,
"alnum_prop": 0.5269703212024109,
"repo_name": "ntuecon/server",
"id": "c94ee7e3130df72cb0338937802b30376ace4aee",
"size": "13108",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/automat/_test/test_methodical.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
} |
import sys, os, logging
from hyperopt import hp, fmin, tpe
from hyperopt import STATUS_OK, STATUS_FAIL
from ghosh import GhoshModel, MODEL_FILE
LEARN = 'a'
KERNEL = 'kernel'
BATCH = 'batch'
log = logging.getLogger('OPTIMIZER')
class Optimizer:
def __init__(self, model, output_dir, train_path, validation_path):
self.model = model
self.output_dir = output_dir
self.train_path = train_path
self.validation_path = validation_path
def objective(self, args):
learn = args[LEARN]
kernel = args[KERNEL]
batch = args[BATCH]
log.info("Running objective: %s, %s, %s" % (str(batch), str(kernel), str(learn)))
log.info(" Batch size: %s" % str(batch))
log.info(" Kernel size: %s" % str(kernel))
log.info(" Learning rate: %s" % str(learn))
if learn is None or kernel is None or batch is None:
return {'status': STATUS_FAIL}
out_path = self.get_output_dir(learn, kernel, batch)
log.info("Outputting to %s" % out_path)
if not os.path.isdir(out_path):
os.mkdir(out_path)
log.info("Creating model")
model = self.model(self.train_path,
self.validation_path,
learning_rate=learn,
kernel_size=kernel,
batch_size=batch,
output_dir=out_path,
retrain=True)
log.info("Running model")
f1_score, accuracy = model.run()
result = {
'status': STATUS_OK,
'loss': 1 - (sum(f1_score) / float(len(f1_score))),
'attachments': {
'model': model.get_file_path(MODEL_FILE),
'dir': out_path,
'f1_score': f1_score,
'accuracy': accuracy
}
}
log.info(result)
return result
def get_output_dir(self, a, b, c):
return os.path.join(self.output_dir, "optimize_%s_%s_%s" % (a,b,c))
def main():
#Setup log
dir_path = os.path.dirname(os.path.realpath(__file__))
fmt = "%(levelname) -10s %(asctime)s %(module)s:%(lineno)s %(funcName)s %(message)s"
handler = logging.FileHandler(os.path.join(dir_path, 'optimizer.log'), mode='w')
handler.setFormatter(logging.Formatter(fmt))
log.addHandler(handler)
log.setLevel(logging.DEBUG)
try:
optimizer = Optimizer(GhoshModel, sys.argv[3], sys.argv[1], sys.argv[2])
except Exception as e:
log.error(e)
space = {
LEARN: hp.uniform(LEARN, 0.0000001, 0.0001),
KERNEL: hp.quniform(KERNEL, 8, 3, 1),
BATCH: hp.quniform(BATCH, 128, 4, 1)
}
log.info("Space:")
log.info(space)
best = fmin(optimizer.objective,
space=space,
algo=tpe.suggest,
max_evals=100)
print(best)
log.info(str(best))
if __name__ == '__main__':
main()
| {
"content_hash": "fbf50680b89e449cd6ef7d85c8e03e94",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 89,
"avg_line_length": 28.596153846153847,
"alnum_prop": 0.5457296570275723,
"repo_name": "cosanlab/emote",
"id": "1f11d89f93bc15a2b8711557ee5f2b9f92e2dc9f",
"size": "2974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2/optimize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "124"
},
{
"name": "Python",
"bytes": "93872"
},
{
"name": "Shell",
"bytes": "2788"
}
],
"symlink_target": ""
} |
"""Dropout operations and handling."""
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import tensorflow as tf
# Key to collect dropout probabilities.
DROPOUTS = "dropouts"
def dropout(tensor_in, prob, name=None):
"""Adds dropout node and stores probability tensor into graph collection.
Args:
tensor_in: Input tensor.
prob: Float or Tensor.
Returns:
Tensor of the same shape of `tensor_in`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with tf.op_scope([tensor_in], name, "dropout") as name:
if isinstance(prob, float):
prob = tf.get_variable("prob", [],
initializer=tf.constant_initializer(prob),
trainable=False)
tf.add_to_collection(DROPOUTS, prob)
return tf.nn.dropout(tensor_in, prob)
| {
"content_hash": "733251783d9592d931342ffcc351d74b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 33.644444444444446,
"alnum_prop": 0.6651254953764861,
"repo_name": "dansbecker/skflow",
"id": "b0c5836632ff0bea713e560e75bdee0e5223a059",
"size": "1514",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "skflow/ops/dropout_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "178200"
},
{
"name": "Shell",
"bytes": "2075"
}
],
"symlink_target": ""
} |
import sys
import unicodedata
#reload(sys)
#sys.setdefaultencoding("utf-8")
class StopwordFilter:
def __init__(self):
self.list = []
def add_stopword(self, word):
self.list.append(word)
def get_stopword_list(self):
return self.list
def filter(self, sentence):
tmp_sentence = []
for word in sentence:
word = self.remove_accents(word).lower()
if word not in self.list:
tmp_sentence.append(word)
return tmp_sentence
def remove_accents(self, string):
nkfd_form = unicodedata.normalize('NFKD', string)
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
def load(self, lang):
with open('cognitiveSQL/stopwords/' + lang + '.txt', encoding='utf8') as f:
lines = f.read().split('\n')
for word in lines:
stopword = self.remove_accents(word).lower()
self.list.append(stopword)
| {
"content_hash": "7a4e27b745862f09fcc30eb00ec5b8d9",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 83,
"avg_line_length": 28.542857142857144,
"alnum_prop": 0.5785785785785785,
"repo_name": "dhmodi/virtual_patient_assistant",
"id": "be79a0e461d140e450b00bff34e7cd67c42e3ac3",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cognitiveSQL/StopwordFilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "372684"
},
{
"name": "HTML",
"bytes": "17847"
},
{
"name": "JavaScript",
"bytes": "753996"
},
{
"name": "Python",
"bytes": "81149"
}
],
"symlink_target": ""
} |
"""
Integer factorization
"""
from __future__ import print_function, division
import random
import math
from .primetest import isprime
from .generate import sieve, primerange, nextprime
from sympy.core import sympify
from sympy.core.evalf import bitcount
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import igcd, Rational
from sympy.core.power import integer_nthroot, Pow
from sympy.core.mul import Mul
from sympy.core.compatibility import as_int, SYMPY_INTS, range
from sympy.core.singleton import S
from sympy.core.function import Function
small_trailing = [i and max(int(not i % 2**j) and j for j in range(1, 8))
for i in range(256)]
def smoothness(n):
"""
Return the B-smooth and B-power smooth values of n.
The smoothness of n is the largest prime factor of n; the power-
smoothness is the largest divisor raised to its multiplicity.
>>> from sympy.ntheory.factor_ import smoothness
>>> smoothness(2**7*3**2)
(3, 128)
>>> smoothness(2**4*13)
(13, 16)
>>> smoothness(2)
(2, 2)
See Also
========
factorint, smoothness_p
"""
if n == 1:
return (1, 1) # not prime, but otherwise this causes headaches
facs = factorint(n)
return max(facs), max(m**facs[m] for m in facs)
def smoothness_p(n, m=-1, power=0, visual=None):
"""
Return a list of [m, (p, (M, sm(p + m), psm(p + m)))...]
where:
1. p**M is the base-p divisor of n
2. sm(p + m) is the smoothness of p + m (m = -1 by default)
3. psm(p + m) is the power smoothness of p + m
The list is sorted according to smoothness (default) or by power smoothness
if power=1.
The smoothness of the numbers to the left (m = -1) or right (m = 1) of a
factor govern the results that are obtained from the p +/- 1 type factoring
methods.
>>> from sympy.ntheory.factor_ import smoothness_p, factorint
>>> smoothness_p(10431, m=1)
(1, [(3, (2, 2, 4)), (19, (1, 5, 5)), (61, (1, 31, 31))])
>>> smoothness_p(10431)
(-1, [(3, (2, 2, 2)), (19, (1, 3, 9)), (61, (1, 5, 5))])
>>> smoothness_p(10431, power=1)
(-1, [(3, (2, 2, 2)), (61, (1, 5, 5)), (19, (1, 3, 9))])
If visual=True then an annotated string will be returned:
>>> print(smoothness_p(21477639576571, visual=1))
p**i=4410317**1 has p-1 B=1787, B-pow=1787
p**i=4869863**1 has p-1 B=2434931, B-pow=2434931
This string can also be generated directly from a factorization dictionary
and vice versa:
>>> factorint(17*9)
{3: 2, 17: 1}
>>> smoothness_p(_)
'p**i=3**2 has p-1 B=2, B-pow=2\\np**i=17**1 has p-1 B=2, B-pow=16'
>>> smoothness_p(_)
{3: 2, 17: 1}
The table of the output logic is:
====== ====== ======= =======
| Visual
------ ----------------------
Input True False other
====== ====== ======= =======
dict str tuple str
str str tuple dict
tuple str tuple str
n str tuple tuple
mul str tuple tuple
====== ====== ======= =======
See Also
========
factorint, smoothness
"""
from sympy.utilities import flatten
# visual must be True, False or other (stored as None)
if visual in (1, 0):
visual = bool(visual)
elif visual not in (True, False):
visual = None
if type(n) is str:
if visual:
return n
d = {}
for li in n.splitlines():
k, v = [int(i) for i in
li.split('has')[0].split('=')[1].split('**')]
d[k] = v
if visual is not True and visual is not False:
return d
return smoothness_p(d, visual=False)
elif type(n) is not tuple:
facs = factorint(n, visual=False)
if power:
k = -1
else:
k = 1
if type(n) is not tuple:
rv = (m, sorted([(f,
tuple([M] + list(smoothness(f + m))))
for f, M in [i for i in facs.items()]],
key=lambda x: (x[1][k], x[0])))
else:
rv = n
if visual is False or (visual is not True) and (type(n) in [int, Mul]):
return rv
lines = []
for dat in rv[1]:
dat = flatten(dat)
dat.insert(2, m)
lines.append('p**i=%i**%i has p%+i B=%i, B-pow=%i' % tuple(dat))
return '\n'.join(lines)
def trailing(n):
"""Count the number of trailing zero digits in the binary
representation of n, i.e. determine the largest power of 2
that divides n.
Examples
========
>>> from sympy import trailing
>>> trailing(128)
7
>>> trailing(63)
0
"""
n = int(n)
if not n:
return 0
low_byte = n & 0xff
if low_byte:
return small_trailing[low_byte]
# 2**m is quick for z up through 2**30
z = bitcount(n) - 1
if isinstance(z, SYMPY_INTS):
if n == 1 << z:
return z
t = 0
p = 8
while not n & 1:
while not n & ((1 << p) - 1):
n >>= p
t += p
p *= 2
p //= 2
return t
def multiplicity(p, n):
"""
Find the greatest integer m such that p**m divides n.
Examples
========
>>> from sympy.ntheory import multiplicity
>>> from sympy.core.numbers import Rational as R
>>> [multiplicity(5, n) for n in [8, 5, 25, 125, 250]]
[0, 1, 2, 3, 3]
>>> multiplicity(3, R(1, 9))
-2
"""
try:
p, n = as_int(p), as_int(n)
except ValueError:
if all(isinstance(i, (SYMPY_INTS, Rational)) for i in (p, n)):
try:
p = Rational(p)
n = Rational(n)
if p.q == 1:
if n.p == 1:
return -multiplicity(p.p, n.q)
return S.Zero
elif p.p == 1:
return multiplicity(p.q, n.q)
else:
like = min(
multiplicity(p.p, n.p),
multiplicity(p.q, n.q))
cross = min(
multiplicity(p.q, n.p),
multiplicity(p.p, n.q))
return like - cross
except AttributeError:
pass
raise ValueError('expecting ints or fractions, got %s and %s' % (p, n))
if p == 2:
return trailing(n)
if p < 2:
raise ValueError('p must be an integer, 2 or larger, but got %s' % p)
if p == n:
return 1
m = 0
n, rem = divmod(n, p)
while not rem:
m += 1
if m > 5:
# The multiplicity could be very large. Better
# to increment in powers of two
e = 2
while 1:
ppow = p**e
if ppow < n:
nnew, rem = divmod(n, ppow)
if not rem:
m += e
e *= 2
n = nnew
continue
return m + multiplicity(p, n)
n, rem = divmod(n, p)
return m
def perfect_power(n, candidates=None, big=True, factor=True):
"""
Return ``(b, e)`` such that ``n`` == ``b**e`` if ``n`` is a
perfect power; otherwise return ``False``.
By default, the base is recursively decomposed and the exponents
collected so the largest possible ``e`` is sought. If ``big=False``
then the smallest possible ``e`` (thus prime) will be chosen.
If ``candidates`` for exponents are given, they are assumed to be sorted
and the first one that is larger than the computed maximum will signal
failure for the routine.
If ``factor=True`` then simultaneous factorization of n is attempted
since finding a factor indicates the only possible root for n. This
is True by default since only a few small factors will be tested in
the course of searching for the perfect power.
Examples
========
>>> from sympy import perfect_power
>>> perfect_power(16)
(2, 4)
>>> perfect_power(16, big = False)
(4, 2)
"""
n = int(n)
if n < 3:
return False
logn = math.log(n, 2)
max_possible = int(logn) + 2 # only check values less than this
not_square = n % 10 in [2, 3, 7, 8] # squares cannot end in 2, 3, 7, 8
if not candidates:
candidates = primerange(2 + not_square, max_possible)
afactor = 2 + n % 2
for e in candidates:
if e < 3:
if e == 1 or e == 2 and not_square:
continue
if e > max_possible:
return False
# see if there is a factor present
if factor:
if n % afactor == 0:
# find what the potential power is
if afactor == 2:
e = trailing(n)
else:
e = multiplicity(afactor, n)
# if it's a trivial power we are done
if e == 1:
return False
# maybe the bth root of n is exact
r, exact = integer_nthroot(n, e)
if not exact:
# then remove this factor and check to see if
# any of e's factors are a common exponent; if
# not then it's not a perfect power
n //= afactor**e
m = perfect_power(n, candidates=primefactors(e), big=big)
if m is False:
return False
else:
r, m = m
# adjust the two exponents so the bases can
# be combined
g = igcd(m, e)
if g == 1:
return False
m //= g
e //= g
r, e = r**m*afactor**e, g
if not big:
e0 = primefactors(e)
if len(e0) > 1 or e0[0] != e:
e0 = e0[0]
r, e = r**(e//e0), e0
return r, e
else:
# get the next factor ready for the next pass through the loop
afactor = nextprime(afactor)
# Weed out downright impossible candidates
if logn/e < 40:
b = 2.0**(logn/e)
if abs(int(b + 0.5) - b) > 0.01:
continue
# now see if the plausible e makes a perfect power
r, exact = integer_nthroot(n, e)
if exact:
if big:
m = perfect_power(r, big=big, factor=factor)
if m is not False:
r, e = m[0], e*m[1]
return int(r), e
else:
return False
def pollard_rho(n, s=2, a=1, retries=5, seed=1234, max_steps=None, F=None):
r"""
Use Pollard's rho method to try to extract a nontrivial factor
of ``n``. The returned factor may be a composite number. If no
factor is found, ``None`` is returned.
The algorithm generates pseudo-random values of x with a generator
function, replacing x with F(x). If F is not supplied then the
function x**2 + ``a`` is used. The first value supplied to F(x) is ``s``.
Upon failure (if ``retries`` is > 0) a new ``a`` and ``s`` will be
supplied; the ``a`` will be ignored if F was supplied.
The sequence of numbers generated by such functions generally have a
a lead-up to some number and then loop around back to that number and
begin to repeat the sequence, e.g. 1, 2, 3, 4, 5, 3, 4, 5 -- this leader
and loop look a bit like the Greek letter rho, and thus the name, 'rho'.
For a given function, very different leader-loop values can be obtained
so it is a good idea to allow for retries:
>>> from sympy.ntheory.generate import cycle_length
>>> n = 16843009
>>> F = lambda x:(2048*pow(x, 2, n) + 32767) % n
>>> for s in range(5):
... print('loop length = %4i; leader length = %3i' % next(cycle_length(F, s)))
...
loop length = 2489; leader length = 42
loop length = 78; leader length = 120
loop length = 1482; leader length = 99
loop length = 1482; leader length = 285
loop length = 1482; leader length = 100
Here is an explicit example where there is a two element leadup to
a sequence of 3 numbers (11, 14, 4) that then repeat:
>>> x=2
>>> for i in range(9):
... x=(x**2+12)%17
... print(x)
...
16
13
11
14
4
11
14
4
11
>>> next(cycle_length(lambda x: (x**2+12)%17, 2))
(3, 2)
>>> list(cycle_length(lambda x: (x**2+12)%17, 2, values=True))
[16, 13, 11, 14, 4]
Instead of checking the differences of all generated values for a gcd
with n, only the kth and 2*kth numbers are checked, e.g. 1st and 2nd,
2nd and 4th, 3rd and 6th until it has been detected that the loop has been
traversed. Loops may be many thousands of steps long before rho finds a
factor or reports failure. If ``max_steps`` is specified, the iteration
is cancelled with a failure after the specified number of steps.
Examples
========
>>> from sympy import pollard_rho
>>> n=16843009
>>> F=lambda x:(2048*pow(x,2,n) + 32767) % n
>>> pollard_rho(n, F=F)
257
Use the default setting with a bad value of ``a`` and no retries:
>>> pollard_rho(n, a=n-2, retries=0)
If retries is > 0 then perhaps the problem will correct itself when
new values are generated for a:
>>> pollard_rho(n, a=n-2, retries=1)
257
References
==========
- Richard Crandall & Carl Pomerance (2005), "Prime Numbers:
A Computational Perspective", Springer, 2nd edition, 229-231
"""
n = int(n)
if n < 5:
raise ValueError('pollard_rho should receive n > 4')
prng = random.Random(seed + retries)
V = s
for i in range(retries + 1):
U = V
if not F:
F = lambda x: (pow(x, 2, n) + a) % n
j = 0
while 1:
if max_steps and (j > max_steps):
break
j += 1
U = F(U)
V = F(F(V)) # V is 2x further along than U
g = igcd(U - V, n)
if g == 1:
continue
if g == n:
break
return int(g)
V = prng.randint(0, n - 1)
a = prng.randint(1, n - 3) # for x**2 + a, a%n should not be 0 or -2
F = None
return None
def pollard_pm1(n, B=10, a=2, retries=0, seed=1234):
"""
Use Pollard's p-1 method to try to extract a nontrivial factor
of ``n``. Either a divisor (perhaps composite) or ``None`` is returned.
The value of ``a`` is the base that is used in the test gcd(a**M - 1, n).
The default is 2. If ``retries`` > 0 then if no factor is found after the
first attempt, a new ``a`` will be generated randomly (using the ``seed``)
and the process repeated.
Note: the value of M is lcm(1..B) = reduce(ilcm, range(2, B + 1)).
A search is made for factors next to even numbers having a power smoothness
less than ``B``. Choosing a larger B increases the likelihood of finding a
larger factor but takes longer. Whether a factor of n is found or not
depends on ``a`` and the power smoothness of the even mumber just less than
the factor p (hence the name p - 1).
Although some discussion of what constitutes a good ``a`` some
descriptions are hard to interpret. At the modular.math site referenced
below it is stated that if gcd(a**M - 1, n) = N then a**M % q**r is 1
for every prime power divisor of N. But consider the following:
>>> from sympy.ntheory.factor_ import smoothness_p, pollard_pm1
>>> n=257*1009
>>> smoothness_p(n)
(-1, [(257, (1, 2, 256)), (1009, (1, 7, 16))])
So we should (and can) find a root with B=16:
>>> pollard_pm1(n, B=16, a=3)
1009
If we attempt to increase B to 256 we find that it doesn't work:
>>> pollard_pm1(n, B=256)
>>>
But if the value of ``a`` is changed we find that only multiples of
257 work, e.g.:
>>> pollard_pm1(n, B=256, a=257)
1009
Checking different ``a`` values shows that all the ones that didn't
work had a gcd value not equal to ``n`` but equal to one of the
factors:
>>> from sympy.core.numbers import ilcm, igcd
>>> from sympy import factorint, Pow
>>> M = 1
>>> for i in range(2, 256):
... M = ilcm(M, i)
...
>>> set([igcd(pow(a, M, n) - 1, n) for a in range(2, 256) if
... igcd(pow(a, M, n) - 1, n) != n])
set([1009])
But does aM % d for every divisor of n give 1?
>>> aM = pow(255, M, n)
>>> [(d, aM%Pow(*d.args)) for d in factorint(n, visual=True).args]
[(257**1, 1), (1009**1, 1)]
No, only one of them. So perhaps the principle is that a root will
be found for a given value of B provided that:
1) the power smoothness of the p - 1 value next to the root
does not exceed B
2) a**M % p != 1 for any of the divisors of n.
By trying more than one ``a`` it is possible that one of them
will yield a factor.
Examples
========
With the default smoothness bound, this number can't be cracked:
>>> from sympy.ntheory import pollard_pm1, primefactors
>>> pollard_pm1(21477639576571)
Increasing the smoothness bound helps:
>>> pollard_pm1(21477639576571, B=2000)
4410317
Looking at the smoothness of the factors of this number we find:
>>> from sympy.utilities import flatten
>>> from sympy.ntheory.factor_ import smoothness_p, factorint
>>> print(smoothness_p(21477639576571, visual=1))
p**i=4410317**1 has p-1 B=1787, B-pow=1787
p**i=4869863**1 has p-1 B=2434931, B-pow=2434931
The B and B-pow are the same for the p - 1 factorizations of the divisors
because those factorizations had a very large prime factor:
>>> factorint(4410317 - 1)
{2: 2, 617: 1, 1787: 1}
>>> factorint(4869863-1)
{2: 1, 2434931: 1}
Note that until B reaches the B-pow value of 1787, the number is not cracked;
>>> pollard_pm1(21477639576571, B=1786)
>>> pollard_pm1(21477639576571, B=1787)
4410317
The B value has to do with the factors of the number next to the divisor,
not the divisors themselves. A worst case scenario is that the number next
to the factor p has a large prime divisisor or is a perfect power. If these
conditions apply then the power-smoothness will be about p/2 or p. The more
realistic is that there will be a large prime factor next to p requiring
a B value on the order of p/2. Although primes may have been searched for
up to this level, the p/2 is a factor of p - 1, something that we don't
know. The modular.math reference below states that 15% of numbers in the
range of 10**15 to 15**15 + 10**4 are 10**6 power smooth so a B of 10**6
will fail 85% of the time in that range. From 10**8 to 10**8 + 10**3 the
percentages are nearly reversed...but in that range the simple trial
division is quite fast.
References
==========
- Richard Crandall & Carl Pomerance (2005), "Prime Numbers:
A Computational Perspective", Springer, 2nd edition, 236-238
- http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node81.html
- http://www.cs.toronto.edu/~yuvalf/Factorization.pdf
"""
n = int(n)
if n < 4 or B < 3:
raise ValueError('pollard_pm1 should receive n > 3 and B > 2')
prng = random.Random(seed + B)
# computing a**lcm(1,2,3,..B) % n for B > 2
# it looks weird, but it's right: primes run [2, B]
# and the answer's not right until the loop is done.
for i in range(retries + 1):
aM = a
for p in sieve.primerange(2, B + 1):
e = int(math.log(B, p))
aM = pow(aM, pow(p, e), n)
g = igcd(aM - 1, n)
if 1 < g < n:
return int(g)
# get a new a:
# since the exponent, lcm(1..B), is even, if we allow 'a' to be 'n-1'
# then (n - 1)**even % n will be 1 which will give a g of 0 and 1 will
# give a zero, too, so we set the range as [2, n-2]. Some references
# say 'a' should be coprime to n, but either will detect factors.
a = prng.randint(2, n - 2)
def _trial(factors, n, candidates, verbose=False):
"""
Helper function for integer factorization. Trial factors ``n`
against all integers given in the sequence ``candidates``
and updates the dict ``factors`` in-place. Returns the reduced
value of ``n`` and a flag indicating whether any factors were found.
"""
if verbose:
factors0 = list(factors.keys())
nfactors = len(factors)
for d in candidates:
if n % d == 0:
m = multiplicity(d, n)
n //= d**m
factors[d] = m
if verbose:
for k in sorted(set(factors).difference(set(factors0))):
print(factor_msg % (k, factors[k]))
return int(n), len(factors) != nfactors
def _check_termination(factors, n, limitp1, use_trial, use_rho, use_pm1,
verbose):
"""
Helper function for integer factorization. Checks if ``n``
is a prime or a perfect power, and in those cases updates
the factorization and raises ``StopIteration``.
"""
if verbose:
print('Check for termination')
# since we've already been factoring there is no need to do
# simultaneous factoring with the power check
p = perfect_power(n, factor=False)
if p is not False:
base, exp = p
if limitp1:
limit = limitp1 - 1
else:
limit = limitp1
facs = factorint(base, limit, use_trial, use_rho, use_pm1,
verbose=False)
for b, e in facs.items():
if verbose:
print(factor_msg % (b, e))
factors[b] = exp*e
raise StopIteration
if isprime(n):
factors[int(n)] = 1
raise StopIteration
if n == 1:
raise StopIteration
trial_int_msg = "Trial division with ints [%i ... %i] and fail_max=%i"
trial_msg = "Trial division with primes [%i ... %i]"
rho_msg = "Pollard's rho with retries %i, max_steps %i and seed %i"
pm1_msg = "Pollard's p-1 with smoothness bound %i and seed %i"
factor_msg = '\t%i ** %i'
fermat_msg = 'Close factors satisying Fermat condition found.'
complete_msg = 'Factorization is complete.'
def _factorint_small(factors, n, limit, fail_max):
"""
Return the value of n and either a 0 (indicating that factorization up
to the limit was complete) or else the next near-prime that would have
been tested.
Factoring stops if there are fail_max unsuccessful tests in a row.
If factors of n were found they will be in the factors dictionary as
{factor: multiplicity} and the returned value of n will have had those
factors removed. The factors dictionary is modified in-place.
"""
def done(n, d):
"""return n, d if the sqrt(n) wasn't reached yet, else
n, 0 indicating that factoring is done.
"""
if d*d <= n:
return n, d
return n, 0
d = 2
m = trailing(n)
if m:
factors[d] = m
n >>= m
d = 3
if limit < d:
if n > 1:
factors[n] = 1
return done(n, d)
# reduce
m = 0
while n % d == 0:
n //= d
m += 1
if m == 20:
mm = multiplicity(d, n)
m += mm
n //= d**mm
break
if m:
factors[d] = m
# when d*d exceeds maxx or n we are done; if limit**2 is greater
# than n then maxx is set to zero so the value of n will flag the finish
if limit*limit > n:
maxx = 0
else:
maxx = limit*limit
dd = maxx or n
d = 5
fails = 0
while fails < fail_max:
if d*d > dd:
break
# d = 6*i - 1
# reduce
m = 0
while n % d == 0:
n //= d
m += 1
if m == 20:
mm = multiplicity(d, n)
m += mm
n //= d**mm
break
if m:
factors[d] = m
dd = maxx or n
fails = 0
else:
fails += 1
d += 2
if d*d > dd:
break
# d = 6*i - 1
# reduce
m = 0
while n % d == 0:
n //= d
m += 1
if m == 20:
mm = multiplicity(d, n)
m += mm
n //= d**mm
break
if m:
factors[d] = m
dd = maxx or n
fails = 0
else:
fails += 1
# d = 6*(i+1) - 1
d += 4
return done(n, d)
def factorint(n, limit=None, use_trial=True, use_rho=True, use_pm1=True,
verbose=False, visual=None):
r"""
Given a positive integer ``n``, ``factorint(n)`` returns a dict containing
the prime factors of ``n`` as keys and their respective multiplicities
as values. For example:
>>> from sympy.ntheory import factorint
>>> factorint(2000) # 2000 = (2**4) * (5**3)
{2: 4, 5: 3}
>>> factorint(65537) # This number is prime
{65537: 1}
For input less than 2, factorint behaves as follows:
- ``factorint(1)`` returns the empty factorization, ``{}``
- ``factorint(0)`` returns ``{0:1}``
- ``factorint(-n)`` adds ``-1:1`` to the factors and then factors ``n``
Partial Factorization:
If ``limit`` (> 3) is specified, the search is stopped after performing
trial division up to (and including) the limit (or taking a
corresponding number of rho/p-1 steps). This is useful if one has
a large number and only is interested in finding small factors (if
any). Note that setting a limit does not prevent larger factors
from being found early; it simply means that the largest factor may
be composite. Since checking for perfect power is relatively cheap, it is
done regardless of the limit setting.
This number, for example, has two small factors and a huge
semi-prime factor that cannot be reduced easily:
>>> from sympy.ntheory import isprime
>>> from sympy.core.compatibility import long
>>> a = 1407633717262338957430697921446883
>>> f = factorint(a, limit=10000)
>>> f == {991: 1, long(202916782076162456022877024859): 1, 7: 1}
True
>>> isprime(max(f))
False
This number has a small factor and a residual perfect power whose
base is greater than the limit:
>>> factorint(3*101**7, limit=5)
{3: 1, 101: 7}
Visual Factorization:
If ``visual`` is set to ``True``, then it will return a visual
factorization of the integer. For example:
>>> from sympy import pprint
>>> pprint(factorint(4200, visual=True))
3 1 2 1
2 *3 *5 *7
Note that this is achieved by using the evaluate=False flag in Mul
and Pow. If you do other manipulations with an expression where
evaluate=False, it may evaluate. Therefore, you should use the
visual option only for visualization, and use the normal dictionary
returned by visual=False if you want to perform operations on the
factors.
You can easily switch between the two forms by sending them back to
factorint:
>>> from sympy import Mul, Pow
>>> regular = factorint(1764); regular
{2: 2, 3: 2, 7: 2}
>>> pprint(factorint(regular))
2 2 2
2 *3 *7
>>> visual = factorint(1764, visual=True); pprint(visual)
2 2 2
2 *3 *7
>>> print(factorint(visual))
{2: 2, 3: 2, 7: 2}
If you want to send a number to be factored in a partially factored form
you can do so with a dictionary or unevaluated expression:
>>> factorint(factorint({4: 2, 12: 3})) # twice to toggle to dict form
{2: 10, 3: 3}
>>> factorint(Mul(4, 12, evaluate=False))
{2: 4, 3: 1}
The table of the output logic is:
====== ====== ======= =======
Visual
------ ----------------------
Input True False other
====== ====== ======= =======
dict mul dict mul
n mul dict dict
mul mul dict dict
====== ====== ======= =======
Notes
=====
Algorithm:
The function switches between multiple algorithms. Trial division
quickly finds small factors (of the order 1-5 digits), and finds
all large factors if given enough time. The Pollard rho and p-1
algorithms are used to find large factors ahead of time; they
will often find factors of the order of 10 digits within a few
seconds:
>>> factors = factorint(12345678910111213141516)
>>> for base, exp in sorted(factors.items()):
... print('%s %s' % (base, exp))
...
2 2
2507191691 1
1231026625769 1
Any of these methods can optionally be disabled with the following
boolean parameters:
- ``use_trial``: Toggle use of trial division
- ``use_rho``: Toggle use of Pollard's rho method
- ``use_pm1``: Toggle use of Pollard's p-1 method
``factorint`` also periodically checks if the remaining part is
a prime number or a perfect power, and in those cases stops.
If ``verbose`` is set to ``True``, detailed progress is printed.
See Also
========
smoothness, smoothness_p, divisors
"""
factordict = {}
if visual and not isinstance(n, Mul) and not isinstance(n, dict):
factordict = factorint(n, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose, visual=False)
elif isinstance(n, Mul):
factordict = dict([(int(k), int(v)) for k, v in
list(n.as_powers_dict().items())])
elif isinstance(n, dict):
factordict = n
if factordict and (isinstance(n, Mul) or isinstance(n, dict)):
# check it
for k in list(factordict.keys()):
if isprime(k):
continue
e = factordict.pop(k)
d = factorint(k, limit=limit, use_trial=use_trial, use_rho=use_rho,
use_pm1=use_pm1, verbose=verbose, visual=False)
for k, v in d.items():
if k in factordict:
factordict[k] += v*e
else:
factordict[k] = v*e
if visual or (type(n) is dict and
visual is not True and
visual is not False):
if factordict == {}:
return S.One
if -1 in factordict:
factordict.pop(-1)
args = [S.NegativeOne]
else:
args = []
args.extend([Pow(*i, evaluate=False)
for i in sorted(factordict.items())])
return Mul(*args, evaluate=False)
elif isinstance(n, dict) or isinstance(n, Mul):
return factordict
assert use_trial or use_rho or use_pm1
n = as_int(n)
if limit:
limit = int(limit)
# special cases
if n < 0:
factors = factorint(
-n, limit=limit, use_trial=use_trial, use_rho=use_rho,
use_pm1=use_pm1, verbose=verbose, visual=False)
factors[-1] = 1
return factors
if limit and limit < 2:
if n == 1:
return {}
return {n: 1}
elif n < 10:
# doing this we are assured of getting a limit > 2
# when we have to compute it later
return [{0: 1}, {}, {2: 1}, {3: 1}, {2: 2}, {5: 1},
{2: 1, 3: 1}, {7: 1}, {2: 3}, {3: 2}][n]
factors = {}
# do simplistic factorization
if verbose:
sn = str(n)
if len(sn) > 50:
print('Factoring %s' % sn[:5] + \
'..(%i other digits)..' % (len(sn) - 10) + sn[-5:])
else:
print('Factoring', n)
if use_trial:
# this is the preliminary factorization for small factors
small = 2**15
fail_max = 600
small = min(small, limit or small)
if verbose:
print(trial_int_msg % (2, small, fail_max))
n, next_p = _factorint_small(factors, n, small, fail_max)
else:
next_p = 2
if factors and verbose:
for k in sorted(factors):
print(factor_msg % (k, factors[k]))
if next_p == 0:
if n > 1:
factors[int(n)] = 1
if verbose:
print(complete_msg)
return factors
# continue with more advanced factorization methods
# first check if the simplistic run didn't finish
# because of the limit and check for a perfect
# power before exiting
try:
if limit and next_p > limit:
if verbose:
print('Exceeded limit:', limit)
_check_termination(factors, n, limit, use_trial, use_rho, use_pm1,
verbose)
if n > 1:
factors[int(n)] = 1
return factors
else:
# Before quitting (or continuing on)...
# ...do a Fermat test since it's so easy and we need the
# square root anyway. Finding 2 factors is easy if they are
# "close enough." This is the big root equivalent of dividing by
# 2, 3, 5.
sqrt_n = integer_nthroot(n, 2)[0]
a = sqrt_n + 1
a2 = a**2
b2 = a2 - n
for i in range(3):
b, fermat = integer_nthroot(b2, 2)
if fermat:
break
b2 += 2*a + 1 # equiv to (a+1)**2 - n
a += 1
if fermat:
if verbose:
print(fermat_msg)
if limit:
limit -= 1
for r in [a - b, a + b]:
facs = factorint(r, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose)
factors.update(facs)
raise StopIteration
# ...see if factorization can be terminated
_check_termination(factors, n, limit, use_trial, use_rho, use_pm1,
verbose)
except StopIteration:
if verbose:
print(complete_msg)
return factors
# these are the limits for trial division which will
# be attempted in parallel with pollard methods
low, high = next_p, 2*next_p
limit = limit or sqrt_n
# add 1 to make sure limit is reached in primerange calls
limit += 1
while 1:
try:
high_ = high
if limit < high_:
high_ = limit
# Trial division
if use_trial:
if verbose:
print(trial_msg % (low, high_))
ps = sieve.primerange(low, high_)
n, found_trial = _trial(factors, n, ps, verbose)
if found_trial:
_check_termination(factors, n, limit, use_trial, use_rho,
use_pm1, verbose)
else:
found_trial = False
if high > limit:
if verbose:
print('Exceeded limit:', limit)
if n > 1:
factors[int(n)] = 1
raise StopIteration
# Only used advanced methods when no small factors were found
if not found_trial:
if (use_pm1 or use_rho):
high_root = max(int(math.log(high_**0.7)), low, 3)
# Pollard p-1
if use_pm1:
if verbose:
print(pm1_msg % (high_root, high_))
c = pollard_pm1(n, B=high_root, seed=high_)
if c:
# factor it and let _trial do the update
ps = factorint(c, limit=limit - 1,
use_trial=use_trial,
use_rho=use_rho,
use_pm1=use_pm1,
verbose=verbose)
n, _ = _trial(factors, n, ps, verbose=False)
_check_termination(factors, n, limit, use_trial,
use_rho, use_pm1, verbose)
# Pollard rho
if use_rho:
max_steps = high_root
if verbose:
print(rho_msg % (1, max_steps, high_))
c = pollard_rho(n, retries=1, max_steps=max_steps,
seed=high_)
if c:
# factor it and let _trial do the update
ps = factorint(c, limit=limit - 1,
use_trial=use_trial,
use_rho=use_rho,
use_pm1=use_pm1,
verbose=verbose)
n, _ = _trial(factors, n, ps, verbose=False)
_check_termination(factors, n, limit, use_trial,
use_rho, use_pm1, verbose)
except StopIteration:
if verbose:
print(complete_msg)
return factors
low, high = high, high*2
def primefactors(n, limit=None, verbose=False):
"""Return a sorted list of n's prime factors, ignoring multiplicity
and any composite factor that remains if the limit was set too low
for complete factorization. Unlike factorint(), primefactors() does
not return -1 or 0.
Examples
========
>>> from sympy.ntheory import primefactors, factorint, isprime
>>> primefactors(6)
[2, 3]
>>> primefactors(-5)
[5]
>>> sorted(factorint(123456).items())
[(2, 6), (3, 1), (643, 1)]
>>> primefactors(123456)
[2, 3, 643]
>>> sorted(factorint(10000000001, limit=200).items())
[(101, 1), (99009901, 1)]
>>> isprime(99009901)
False
>>> primefactors(10000000001, limit=300)
[101]
See Also
========
divisors
"""
n = int(n)
factors = sorted(factorint(n, limit=limit, verbose=verbose).keys())
s = [f for f in factors[:-1:] if f not in [-1, 0, 1]]
if factors and isprime(factors[-1]):
s += [factors[-1]]
return s
def _divisors(n):
"""Helper function for divisors which generates the divisors."""
factordict = factorint(n)
ps = sorted(factordict.keys())
def rec_gen(n=0):
if n == len(ps):
yield 1
else:
pows = [1]
for j in range(factordict[ps[n]]):
pows.append(pows[-1] * ps[n])
for q in rec_gen(n + 1):
for p in pows:
yield p * q
for p in rec_gen():
yield p
def divisors(n, generator=False):
r"""
Return all divisors of n sorted from 1..n by default.
If generator is True an unordered generator is returned.
The number of divisors of n can be quite large if there are many
prime factors (counting repeated factors). If only the number of
factors is desired use divisor_count(n).
Examples
========
>>> from sympy import divisors, divisor_count
>>> divisors(24)
[1, 2, 3, 4, 6, 8, 12, 24]
>>> divisor_count(24)
8
>>> list(divisors(120, generator=True))
[1, 2, 4, 8, 3, 6, 12, 24, 5, 10, 20, 40, 15, 30, 60, 120]
This is a slightly modified version of Tim Peters referenced at:
http://stackoverflow.com/questions/1010381/python-factorization
See Also
========
primefactors, factorint, divisor_count
"""
n = as_int(abs(n))
if isprime(n):
return [1, n]
if n == 1:
return [1]
if n == 0:
return []
rv = _divisors(n)
if not generator:
return sorted(rv)
return rv
def divisor_count(n, modulus=1):
"""
Return the number of divisors of ``n``. If ``modulus`` is not 1 then only
those that are divisible by ``modulus`` are counted.
References
==========
- http://www.mayer.dial.pipex.com/maths/formulae.htm
>>> from sympy import divisor_count
>>> divisor_count(6)
4
See Also
========
factorint, divisors, totient
"""
if not modulus:
return 0
elif modulus != 1:
n, r = divmod(n, modulus)
if r:
return 0
if n == 0:
return 0
return Mul(*[v + 1 for k, v in factorint(n).items() if k > 1])
def _antidivisors(n):
"""Helper function for antidivisors which generates the antidivisors."""
for d in _divisors(n):
y = 2*d
if n > y and n % y:
yield y
for d in _divisors(2*n-1):
if n > d >= 2 and n % d:
yield d
for d in _divisors(2*n+1):
if n > d >= 2 and n % d:
yield d
def antidivisors(n, generator=False):
r"""
Return all antidivisors of n sorted from 1..n by default.
Antidivisors [1]_ of n are numbers that do not divide n by the largest
possible margin. If generator is True an unordered generator is returned.
References
==========
.. [1] definition is described in http://oeis.org/A066272/a066272a.html
Examples
========
>>> from sympy.ntheory.factor_ import antidivisors
>>> antidivisors(24)
[7, 16]
>>> sorted(antidivisors(128, generator=True))
[3, 5, 15, 17, 51, 85]
See Also
========
primefactors, factorint, divisors, divisor_count, antidivisor_count
"""
n = as_int(abs(n))
if n <= 2:
return []
rv = _antidivisors(n)
if not generator:
return sorted(rv)
return rv
def antidivisor_count(n):
"""
Return the number of antidivisors [1]_ of ``n``.
References
==========
.. [1] formula from https://oeis.org/A066272
Examples
========
>>> from sympy.ntheory.factor_ import antidivisor_count
>>> antidivisor_count(13)
4
>>> antidivisor_count(27)
5
See Also
========
factorint, divisors, antidivisors, divisor_count, totient
"""
n = as_int(abs(n))
if n <= 2:
return 0
return divisor_count(2*n-1) + divisor_count(2*n+1) + \
divisor_count(n) - divisor_count(n, 2) - 5
class totient(Function):
"""
Calculate the Euler totient function phi(n)
>>> from sympy.ntheory import totient
>>> totient(1)
1
>>> totient(25)
20
See Also
========
divisor_count
"""
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Integer:
if n < 1:
raise ValueError("n must be a positive integer")
factors = factorint(n)
t = 1
for p, k in factors.items():
t *= (p - 1) * p**(k - 1)
return t
def _eval_is_integer(self):
return fuzzy_and([self.args[0].is_integer, self.args[0].is_positive])
class divisor_sigma(Function):
"""
Calculate the divisor function `\sigma_k(n)` for positive integer n
``divisor_sigma(n, k)`` is equal to ``sum([x**k for x in divisors(n)])``
If n's prime factorization is:
.. math ::
n = \prod_{i=1}^\omega p_i^{m_i},
then
.. math ::
\sigma_k(n) = \prod_{i=1}^\omega (1+p_i^k+p_i^{2k}+\cdots
+ p_i^{m_ik}).
Parameters
==========
k : power of divisors in the sum
for k = 0, 1:
``divisor_sigma(n, 0)`` is equal to ``divisor_count(n)``
``divisor_sigma(n, 1)`` is equal to ``sum(divisors(n))``
Default for k is 1.
References
==========
.. [1] http://en.wikipedia.org/wiki/Divisor_function
Examples
========
>>> from sympy.ntheory import divisor_sigma
>>> divisor_sigma(18, 0)
6
>>> divisor_sigma(39, 1)
56
>>> divisor_sigma(12, 2)
210
>>> divisor_sigma(37)
38
See Also
========
divisor_count, totient, divisors, factorint
"""
@classmethod
def eval(cls, n, k=1):
n = sympify(n)
k = sympify(k)
if n.is_prime:
return 1 + n**k
if n.is_Integer:
if n <= 0:
raise ValueError("n must be a positive integer")
else:
return Mul(*[(p**(k*(e + 1)) - 1)/(p**k - 1) if k != 0
else e + 1 for p, e in factorint(n).items()])
def core(n, t=2):
"""
Calculate core(n,t) = `core_t(n)` of a positive integer n
``core_2(n)`` is equal to the squarefree part of n
If n's prime factorization is:
.. math ::
n = \prod_{i=1}^\omega p_i^{m_i},
then
.. math ::
core_t(n) = \prod_{i=1}^\omega p_i^{m_i \mod t}.
Parameters
==========
t : core(n,t) calculates the t-th power free part of n
``core(n, 2)`` is the squarefree part of ``n``
``core(n, 3)`` is the cubefree part of ``n``
Default for t is 2.
References
==========
.. [1] http://en.wikipedia.org/wiki/Square-free_integer#Squarefree_core
Examples
========
>>> from sympy.ntheory.factor_ import core
>>> core(24, 2)
6
>>> core(9424, 3)
1178
>>> core(379238)
379238
>>> core(15**11, 10)
15
See Also
========
factorint
"""
n = as_int(n)
t = as_int(t)
if n <= 0:
raise ValueError("n must be a positive integer")
elif t <= 1:
raise ValueError("t must be >= 2")
else:
y = 1
for p, e in factorint(n).items():
y *= p**(e % t)
return y
def digits(n, b=10):
"""
Return a list of the digits of n in base b. The first element in the list
is b (or -b if n is negative).
Examples
========
>>> from sympy.ntheory.factor_ import digits
>>> digits(35)
[10, 3, 5]
>>> digits(27, 2)
[2, 1, 1, 0, 1, 1]
>>> digits(65536, 256)
[256, 1, 0, 0]
>>> digits(-3958, 27)
[-27, 5, 11, 16]
"""
b = as_int(b)
n = as_int(n)
if b <= 1:
raise ValueError("b must be >= 2")
else:
x, y = abs(n), []
while x >= b:
x, r = divmod(x, b)
y.append(r)
y.append(x)
y.append(-b if n < 0 else b)
y.reverse()
return y
| {
"content_hash": "44eec6ea0b7dee2cd888efc72a8cc3b9",
"timestamp": "",
"source": "github",
"line_count": 1581,
"max_line_length": 86,
"avg_line_length": 29.838077166350413,
"alnum_prop": 0.523169542544622,
"repo_name": "liangjiaxing/sympy",
"id": "2d850a6c4990551e6eff220ded4ed2325542dd5a",
"size": "47174",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sympy/ntheory/factor_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13597754"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import pycurl
import io
import json
jsonStr = '{"port":8099, "fileName":"plugin.xml", "line":5, "col":5, "offsetline":4}'
jsonStr1 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":0, "offsetline":4}'
jsonStr2 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":5, "offsetline":0}'
jsonStr3 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":0, "offsetline":0}'
cj = json.loads(jsonStr)
buf = io.StringIO()
curl = pycurl.Curl()
curl.setopt(curl.URL, 'http://localhost:%s?message=%s:%d:%d:%d' % (cj['port'], cj['fileName'], cj['line'], cj['col'], cj['offsetline']))
curl.setopt(curl.WRITEFUNCTION, buf.write)
try:
curl.perform()
# todo: process return values
except pycurl.error as error:
pass
print(buf.getvalue())
buf.close()
| {
"content_hash": "c4861c2b5949693ace11f6f2b9ce4399",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 136,
"avg_line_length": 34.68181818181818,
"alnum_prop": 0.6435124508519003,
"repo_name": "luoluyao/AppetizerRemoteCall",
"id": "fea9667d27e63aaea9de313a64bc732508187b26",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/openFileExample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "21962"
},
{
"name": "Python",
"bytes": "763"
}
],
"symlink_target": ""
} |
"""Paddings."""
from sonnet.src.pad import causal
from sonnet.src.pad import create
from sonnet.src.pad import full
from sonnet.src.pad import reverse_causal
from sonnet.src.pad import same
from sonnet.src.pad import valid
__all__ = (
"causal",
"create",
"full",
"reverse_causal",
"same",
"valid",
)
| {
"content_hash": "69742d8cb27d1757334e93a50ab5f0fb",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 41,
"avg_line_length": 19.176470588235293,
"alnum_prop": 0.6717791411042945,
"repo_name": "deepmind/sonnet",
"id": "b8fb9c9666ac524cb8e153e77b476a0f1aa937e4",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2",
"path": "sonnet/pad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "794977"
},
{
"name": "Shell",
"bytes": "1844"
},
{
"name": "Starlark",
"bytes": "31841"
}
],
"symlink_target": ""
} |
from __future__ import division
'''
This file provides an active learning environment for the demo interface.
Needs the file 'Topic.xlsx' as input.
Description of functionality
---
data model:
COMMENT KASPAR: I interpreted 'score' as the class of the noun phrase, i.e. 0 or 1.
datapoint = { str(noun phrase) : { 'score': float(controversy score), 'confidence':float(confidence)}}
estimates = l st(datapoint1, .... , datapointN)
labelled = { str(noun phrase) : { 'label' : 'controversial' OR 'noncontroversial') , 'ip' : str(ip address of user) } }
---
Controversy is labelled on the noun-phrase (here considered topic) level.
Timestamps should be implemented on the backend side.
'''
import random # while real data lacks
import json
import sys
import os
# KB: Added modules
import numpy as np
import pandas as pd
import random
# libact classes
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.query_strategies import UncertaintySampling
from elasticsearch import Elasticsearch
from esengine import Payload, Query, Filter
from models.anchor import *
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
def load_anchors():
#query = es.search(index="anchors", doc_type="vaccination")#, query=body)
#topTopics = top['aggregations']['topics']['buckets']
query = {"query": {
"bool": {
"must": [{
"range": {
"features.instances": {
"gt": "0"
}
}
}],
}
}
}
anchors = Anchor.search(query, size=10)
data = pd.DataFrame([a.features for a in anchors if a.features['entities'] > 0], index=[a.label for a in anchors if a.features['entities'] > 0])
'''
We need to keep track of the original topic name. This information is needed
when asking the user whether the topic is controversial
'''
names = data.index
'''
As features we currently only look at # of 'positive' words (col 3),
# of 'negative' words (col 4), and'intensity' (col 5).
'''
X = np.asarray(data.ix[:,2:3])
'''
The active learning environment used here (libact) needs a few coded observation.
Otherwise search new data points won't work
Since the existing spreadsheet already ranked topics according to their controversy scores,
I made a best guess, and assigned the first five to class 1 (controversial) and the last five
to class 0 (not controversial)
'''
y = np.asarray([1,1,1,1,1] + [None]*(X.shape[0]-10)+[0,0,0,0,0])
return X,y,names
def loadDocuments():
documents = Article.all()
return documents
def load_data(path_to_file):
'''
Okay, let's load the Excel spreadsheet in which topics,
here understood as noun phrases, are given controversy scores.
'''
data = pd.read_excel(path_to_file, header=None,skiprows=1)
'''
We need to keep track of the original topic name. This information is needed
when asking the user whether the topic is controversial
'''
names = list(data.ix[:,0])
'''
As features we currently only look at # of 'positive' words (col 3),
# of 'negative' words (col 4), and'intensity' (col 5).
'''
X = np.asarray(data.ix[:,3:5])
'''
The active learning environment used here (libact) needs a few coded observation.
Otherwise search new data points won't work
Since the existing spreadsheet already ranked topics according to their controversy scores,
I made a best guess, and assigned the first five to class 1 (controversial) and the last five
to class 0 (not controversial)
'''
y = np.asarray([1,1,1,1,1] + [None]*(X.shape[0]-10)+[0,0,0,0,0])
return X,y,names
def initialize_model(X,y):
'''
Convert feature matrix and target vector to a format that is
easy to digest for the libact model and searchers
'''
trn_ds = Dataset(X,y)
'''
Define model. We start with a simple Logistic Regression.
More refined models can be implemented later.
There is a possibility to integrate Sklearn classifiers.
'''
model=LogisticRegression()
'''
Before looking for new datapoins the model needs to fitted using
the scarce observation we have given, see the construction of the 'y'
target vector in the load_data function
'''
model.train(trn_ds)
return model,trn_ds
def convert_label(label):
'''
Function that converts manually given labels to a binary class.
'''
if label == 'controversial':
return 1
return 0
def unsure(data=None):
'''
implements the /controversial endpoint.
parameters
----
data: labeled
returns
---
{
controversial : estimates,
noncontroversial : estimates
}
CHANGED CODE HERE: We'll use active learning to search for new datapoints.
Two scenarios are possible:
1) If a data point _and_ label are given, it will update the training set and retrain the model.
2) If no data point is given, it will search for a new data point to code and return this.
'''
if data:
''' expects an object like: {'nounphrase': {'label':'controversial'/'noncrontroversial','ip':'127.0.01'}}'''
data = json.loads(data)
'''get the topic name'''
## Python 2.7 3.5 difference here!
# Python 3.5 use
name = list(data.keys())[0]
# Python 2.7 use
# name = data.keys()[0]
'''get the label'''
label = convert_label(data[name]['label'])
'''get the position of the topic in the training set'''
ask_id = names.index(name)
'''update training set with new label'''
trn_ds.update(ask_id, label)
'''retrain model'''
model.train(trn_ds)
else:
'''
When asked for a new data point, we call the UncertaintySampling method
and write the name of this topic to JSON file
'''
ask_id = qs.make_query()
results = { 'unsure' : names[ask_id] }
return json.dumps(results)
def controversial(trn_ds):
'''
implements the /controversial endpoint.
parameters
----
none
returns
---
{
controversial : estimates,
noncontroversial : estimates
}
This function returns the ten most controversial and non controversial topic based on the current model.
'''
labeled_features = trn_ds.get_labeled_entries()
positions = [i for i,a in enumerate(trn_ds.data) if trn_ds.data[i][1] != None]
datapoints = []
for p in positions:
datapoints.append(
{
'anchor' : names[p],
'score':model.predict(X[p])[0],
# Get confidence for class one, i.e. the topic being controversial
'confidence':model.predict_real(X[p])[0][1]
}
)
datapoints_sorted = sorted(datapoints, key=lambda x: (x['confidence']),reverse=True)
controversial = datapoints_sorted[:10]
noncontroversial = datapoints_sorted[-10:]
keys = {label:label.replace(' ','_').lower() for label in names}
results = {'controversial':controversial, 'noncontroversial':noncontroversial, 'keys':keys}
return results
if __name__=='__main__':
input = sys.argv[1:]
task = input[0]
if task == 'anchor' and len(input) == 2:
# return anchor
anchor = input[1]
try:
anchor = Anchor.get(id=anchor)
instances = anchor.getInstances()
for i in instances:
i['_source']['sentences'] = i['_source']['sentences'][0:2]
print json.dumps(instances)
except:
print json.dumps([])
if task == 'article' and len(input) == 2:
# return article
article = input[1]
try:
article = Article.get(id=article)
print json.dumps(vars(article), default=str)
except:
print json.dumps([])
if task == 'controversial':
## DEMO ##
# Initialize the model
#X,y,names = load_anchors()
#X,y,names = load_anchors()
#model,trn_ds = initialize_model(X,y)
#qs = UncertaintySampling(trn_ds, method='lc', model=LogisticRegression())
# Cell used for simulation, we randomly annotate words as being controversial or not
# During each iteration we update the model.
# Lastly we call the 'controversial' function and sort all topics as controversial
# or not based on the confidence score returned by the logistic regression
#import warnings
#warnings.filterwarnings('ignore')
#n_turns = 10
#answers = ['noncontroversial','controversial']*int(n_turns/2)
#random.shuffle(answers)
# for t in range(n_turns):
# result = unsure()
# labeled = {json.loads(result)['unsure']:{'label':answers[t],'ip':'127.0.01'}}
# unsure(json.dumps(labeled))
#controversies = controversial(trn_ds)
documents = loadDocuments()
print(json.dumps(controversies))
| {
"content_hash": "276d15f0c8c342d44cbb10f664ff2c00",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 148,
"avg_line_length": 28.70440251572327,
"alnum_prop": 0.6214943032427696,
"repo_name": "ControCurator/controcurator",
"id": "b3e136d100edc1edfd02948f1398efd3ec83adb1",
"size": "9142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_code/endpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "349"
},
{
"name": "HTML",
"bytes": "37291"
},
{
"name": "JavaScript",
"bytes": "32468"
},
{
"name": "Jupyter Notebook",
"bytes": "24678"
},
{
"name": "Python",
"bytes": "208868"
}
],
"symlink_target": ""
} |
import mock
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.druid_operator import DruidOperator
from airflow.utils import timezone
from airflow.models import TaskInstance
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestDruidOperator(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': timezone.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_read_spec_from_file(self):
m = mock.mock_open(read_data='{"some": "json"}')
with mock.patch('airflow.contrib.operators.druid_operator.open', m, create=True) as m:
druid = DruidOperator(
task_id='druid_indexing_job',
json_index_file='index_spec.json',
dag=self.dag
)
m.assert_called_once_with('index_spec.json')
self.assertEqual(druid.index_spec_str, '{\n "some": "json"\n}')
def test_render_template(self):
json_str = '''
{
"type": "{{ params.index_type }}",
"datasource": "{{ params.datasource }}",
"spec": {
"dataSchema": {
"granularitySpec": {
"intervals": ["{{ ds }}/{{ macros.ds_add(ds, 1) }}"]
}
}
}
}
'''
m = mock.mock_open(read_data=json_str)
with mock.patch('airflow.contrib.operators.druid_operator.open', m, create=True) as m:
operator = DruidOperator(
task_id='spark_submit_job',
json_index_file='index_spec.json',
params={
'index_type': 'index_hadoop',
'datasource': 'datasource_prd'
},
dag=self.dag
)
ti = TaskInstance(operator, DEFAULT_DATE)
ti.render_templates()
m.assert_called_once_with('index_spec.json')
expected = '''{
"datasource": "datasource_prd",
"spec": {
"dataSchema": {
"granularitySpec": {
"intervals": [
"2017-01-01/2017-01-02"
]
}
}
},
"type": "index_hadoop"
}'''
self.assertEqual(expected, getattr(operator, 'index_spec_str'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "98aac7ae225ed556b9d40e8287bf1786",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 94,
"avg_line_length": 32,
"alnum_prop": 0.4964398734177215,
"repo_name": "zack3241/incubator-airflow",
"id": "c8f92f5958c0cd7ff941a2aea890cd5a4dd43df6",
"size": "3097",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/contrib/operators/test_druid_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152247"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2777082"
},
{
"name": "Shell",
"bytes": "28198"
}
],
"symlink_target": ""
} |
"""This example fetches data from PQL tables and creates match table files."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize a report downloader.
report_downloader = client.GetDataDownloader(version='v201408')
line_items_file = tempfile.NamedTemporaryFile(
prefix='line_items_', suffix='.csv', mode='w', delete=False)
ad_units_file = tempfile.NamedTemporaryFile(
prefix='ad_units_', suffix='.csv', mode='w', delete=False)
line_items_pql_query = ('SELECT Name, Id, Status FROM Line_Item ORDER BY Id '
'ASC')
ad_units_pql_query = 'SELECT Name, Id FROM Ad_Unit ORDER BY Id ASC'
# Downloads the response from PQL select statement to the specified file
report_downloader.DownloadPqlResultToCsv(
line_items_pql_query, line_items_file)
report_downloader.DownloadPqlResultToCsv(
ad_units_pql_query, ad_units_file)
line_items_file.close()
ad_units_file.close()
print ('Saved line items to... %s' % line_items_file.name)
print ('Saved ad units to... %s' % ad_units_file.name)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| {
"content_hash": "e5d392e5dd84baf7ac9bc6ba3c468d3d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 32.21951219512195,
"alnum_prop": 0.6866010598031794,
"repo_name": "coxmediagroup/googleads-python-lib",
"id": "18f2080632b18c06a787db536fb72ad23abbe308",
"size": "1939",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201408/publisher_query_language_service/fetch_match_tables.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535137"
}
],
"symlink_target": ""
} |
import os
from som.wordfish.structures import (
structure_compressed,
structure_folder
)
# If you want to control the debug level, change MESSAGELEVEL in your
# environment
# Eg, only print critical information.
os.environ['MESSAGELEVEL'] = "CRITICAL"
# File
compressed_file = "data.zip"
structure = structure_compressed(compressed_file)
# Folder
folder_path = os.path.abspath("data")
structure = structure_folder(folder_path)
| {
"content_hash": "8002a43d4548561ccef4861adf6a011e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 22.1,
"alnum_prop": 0.751131221719457,
"repo_name": "radinformatics/som-tools",
"id": "de4f937b35078f4b7dce9bd810d5d956f918e193",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/wordfish/structures/structure_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16452"
},
{
"name": "HTML",
"bytes": "780"
},
{
"name": "JavaScript",
"bytes": "163002"
},
{
"name": "Python",
"bytes": "115696"
},
{
"name": "Shell",
"bytes": "1092"
}
],
"symlink_target": ""
} |
"""CompactBlocksTest -- test compact blocks (BIP 152, without segwit support, version 1)
"""
import random
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, HeaderAndShortIDs, msg_block, msg_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ToHex, NODE_HEADERS_COMPRESSED
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until
# TestP2PConn: A peer we use to send messages to dashd, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# both nodes has the same version
self.num_nodes = 2
self.extra_args = [[
"-txindex",
"-acceptnonstdtxn=1",
]] * 2
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers with the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
# This code should be enabled after increasing cmctblk version
#if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
# sendcmpct.version = preferred_version-1
# sendcmpct.announce = True
# old_node.send_and_ping(sendcmpct)
# Header sync
# old_node.request_headers_and_sync(locator=[tip])
# check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes dashd to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# dashd's choice of nonce.
def test_compactblock_construction(self, node, test_node, version):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(20, block_hash) # 20 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that dashd requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
def test_compactblock_requests(self, node, test_node):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
getheaders_key = "getheaders2" if test_node.nServices & NODE_HEADERS_COMPRESSED else "getheaders"
wait_until(lambda: getheaders_key in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 20)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
comp_block.shortids = [
calculate_shortid(k0, k1, block.vtx[0].sha256) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5])
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4])
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0])
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0])
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for dashd to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# dashd will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(20, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(20, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: "cmpctblock" in l.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node):
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4])
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx = CTxIn()
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
self.second_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_HEADERS_COMPRESSED)
self.old_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_HEADERS_COMPRESSED)
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
self.sync_blocks()
self.test_sendcmpct(self.nodes[1], self.second_node, 1)
self.sync_blocks()
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1)
self.sync_blocks()
self.test_compactblock_construction(self.nodes[1], self.second_node, 1)
self.sync_blocks()
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node)
self.sync_blocks()
self.test_compactblock_requests(self.nodes[1], self.second_node)
self.sync_blocks()
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
self.sync_blocks()
self.test_getblocktxn_requests(self.nodes[1], self.second_node, 1)
self.sync_blocks()
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
self.sync_blocks()
self.test_getblocktxn_handler(self.nodes[1], self.second_node, 1)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
self.sync_blocks()
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
self.sync_blocks()
self.test_compactblocks_not_at_tip(self.nodes[1], self.second_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
self.sync_blocks()
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
self.sync_blocks()
self.test_incorrect_blocktxn_response(self.nodes[1], self.second_node, 1)
self.sync_blocks()
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.second_node, self.nodes[1], 1)
self.test_end_to_end_block_relay(self.nodes[0], [self.second_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.second_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.second_node)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.second_node, self.old_node)
self.sync_blocks()
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| {
"content_hash": "6a9dbb12e4b831f16fb50ec3057963fc",
"timestamp": "",
"source": "github",
"line_count": 794,
"max_line_length": 433,
"avg_line_length": 46.44458438287154,
"alnum_prop": 0.643788811454294,
"repo_name": "dashpay/dash",
"id": "a3c750872f8cb57d5c64e5f00e20ac037c8db9ae",
"size": "37086",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/p2p_compactblocks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1866352"
},
{
"name": "C++",
"bytes": "9729795"
},
{
"name": "CMake",
"bytes": "32255"
},
{
"name": "CSS",
"bytes": "113028"
},
{
"name": "Dockerfile",
"bytes": "6344"
},
{
"name": "GDB",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "235904"
},
{
"name": "Makefile",
"bytes": "128711"
},
{
"name": "Objective-C++",
"bytes": "5478"
},
{
"name": "Python",
"bytes": "1899906"
},
{
"name": "QMake",
"bytes": "1389"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "134642"
}
],
"symlink_target": ""
} |
"""
WSGI config for beauty_and_pics project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "beauty_and_pics.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "30ee92b995badae618275daa998fd6bf",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.928571428571427,
"alnum_prop": 0.7728395061728395,
"repo_name": "entpy/beauty-and-pics",
"id": "10d7b48ef3393e4fb71a7a53dc263a1beab9b5df",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beauty_and_pics/beauty_and_pics/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43305"
},
{
"name": "HTML",
"bytes": "318159"
},
{
"name": "JavaScript",
"bytes": "98290"
},
{
"name": "Python",
"bytes": "540131"
}
],
"symlink_target": ""
} |
import creole
from .utils import binders_map, object_slug
class PinaxWikiHtmlEmitter(creole.HtmlEmitter):
def __init__(self, wiki, root, link_rules=None):
self.wiki = wiki
super().__init__(root, link_rules)
def link_emit(self, node):
target = node.content
if node.children:
inside = self.emit_children(node)
else:
inside = self.html_escape(target)
m = self.link_rules.addr_re.match(target)
if m:
if m.group("extern_addr"):
return '<a href="{}">{}</a>'.format(self.attr_escape(target), inside)
elif m.group("inter_wiki"):
raise NotImplementedError
slug = object_slug(self.wiki)
page_url = binders_map()[slug].page_url(self.wiki, target)
return '<a href="{}">{}</a>'.format(self.attr_escape(page_url), inside)
def creole_parse(wiki, text):
document = creole.CreoleParser(text, blog_line_breaks=True).parse()
return PinaxWikiHtmlEmitter(wiki, document).emit()
| {
"content_hash": "0ac2a82da0fdb08cafcdb918df85bd9b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 33.54838709677419,
"alnum_prop": 0.6019230769230769,
"repo_name": "pinax/pinax-wiki",
"id": "4d4923ba51d75b1be6a38767501cb15f4e060e3e",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinax/wiki/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "23785"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("djasana", "0011_modifies_choices"),
]
operations = [
migrations.AlterField(
model_name="project",
name="team",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djasana.Team",
to_field="remote_id",
),
),
]
| {
"content_hash": "7cf071f7021dea231a8634d65bc796ac",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 23.75,
"alnum_prop": 0.5473684210526316,
"repo_name": "sbywater/django-asana",
"id": "6f049cf80cae005e34d0961c62e6321a07f8d03d",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djasana/migrations/0012_project_team_allows_null.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199690"
}
],
"symlink_target": ""
} |
"""Defun decorator for defining graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import sys
import threading
import types as types_lib
import weakref
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# This is to avoid a circular dependency with gradients_impl
gradients_impl._function = sys.modules[__name__] # pylint: disable=protected-access
FORWARD_FUNCTION_ATTRIBUTE_NAME = "forward_function_name"
BACKWARD_FUNCTION_ATTRIBUTE_NAME = "backward_function_name"
# TODO(scottzhu): Update this to allow arbitrary attribute names in future.
WHITELIST_FUNCTION_ATTRIBUTE_REGEX = [
"experimental_.*",
FORWARD_FUNCTION_ATTRIBUTE_NAME,
BACKWARD_FUNCTION_ATTRIBUTE_NAME
]
CacheKey = collections.namedtuple("CacheKey", [
"input_signature", "parent_graph", "device_functions", "colocation_stack",
"uses_xla"
])
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unwhitelisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if not any(re.match(reg, key)
for reg in WHITELIST_FUNCTION_ATTRIBUTE_REGEX):
raise ValueError("Attribute name is not whitelisted. "
"Whitelisted: prefix %s, got: %s" %
(WHITELIST_FUNCTION_ATTRIBUTE_REGEX, key))
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, (str, bytes)):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(key, type(value)))
return attrs
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "__forward_%s_%s" % (n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "__backward_%s_%s" % (n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "__inference_%s_%s" % (n, ops.uid())
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
"""Callable with the interface of `framework.function._DefinedFunction.`
`_EagerDefinedFunction` encapsulates a function definition and its properties,
and it provides a method for calling the encapsulated function. Some Ops
take functions as attributes, which have type `func`; an instance of this
class may be provided as the value of these `func` attributes.
"""
def __init__(self, name, graph, inputs, outputs, attrs):
"""Initializes an eager defined function.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
inputs: the tensors in the graph to be used as inputs to the function
outputs: the tensors in the graph which will be outputs to the function
attrs: dict mapping names of attributes to their AttrValue values
"""
input_ops = set(arg.op for arg in inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(
graph._c_graph, # pylint: disable=protected-access
compat.as_str(name),
False,
[o._c_op for o in operations], # pylint: disable=protected-access
[t._as_tf_output() for t in inputs], # pylint: disable=protected-access
[t._as_tf_output() for t in outputs], # pylint: disable=protected-access
[],
None,
compat.as_str(""))
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(iga): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use status.
pywrap_tensorflow.TF_FunctionSetAttrValueProto(
fn, compat.as_str(name), serialized)
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_function(fn)
self.definition = function_def
self.name = compat.as_bytes(function_def.signature.name)
self.signature = function_def.signature
self._num_outputs = len(self.signature.output_arg)
self._output_types = [o.type for o in self.signature.output_arg]
self._output_shapes = [o.shape for o in outputs]
self._func_graph_outputs = outputs
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
self._graph = graph
self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)
def add_to_graph(self, g):
# pylint: disable=protected-access
if self.name not in g._functions:
g._add_function(self)
for f in self._graph._functions.values():
if f.name not in g._functions:
g._add_function(f)
# pylint: enable=protected-access
@property
def stateful_ops(self):
return self._stateful_ops
def call(self, ctx, args):
"""Calls this function with `args` as inputs.
Function execution respects device annotations only if the function won't
be compiled with xla.
Args:
ctx: a Context object
args: a list of arguments to supply this function with.
Returns:
The outputs of the function call.
Raises:
ValueError: if the number of arguments is incorrect.
"""
executing_eagerly = ctx.executing_eagerly()
if self._graph._xla_compile: # pylint: disable=protected-access
# XLA compilation relies upon a custom kernel creator to run functions.
signature = self.signature
if executing_eagerly:
outputs = execute.execute(
str(signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=None,
ctx=ctx)
else:
g = ops.get_default_graph()
self.add_to_graph(g)
op = g.create_op(
signature.name,
[ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
op_def=signature,
name="FunctionCall",
compute_shapes=False)
outputs = op.outputs
if not outputs:
return op
outputs = [outputs] if isinstance(
outputs, (ops.Tensor, type(None))) else list(outputs)
else:
# TODO(akshayka): Either remove this if the FunctionLibraryRuntime
# creates `PartitionedCallOp` kernels by default, or remove the previous
# branch if a TPU kernel is registered for `PartitionedCall`.
if len(args) != len(self.signature.input_arg):
raise ValueError(
"Arguments and signature arguments do not match: %s %s " %
(len(args), len(list(self.signature.input_arg))))
function_call_options = ctx.get_function_call_options()
outputs = functional_ops.partitioned_call(
args=args,
f=self,
tout=self._output_types,
executing_eagerly=executing_eagerly,
config=function_call_options.config_proto_serialized,
executor_type=function_call_options.executor_type)
if executing_eagerly:
return outputs
else:
for i, shape in enumerate(self._output_shapes):
outputs[i].set_shape(shape)
for i, func_graph_output in enumerate(self._func_graph_outputs):
custom_gradient.copy_handle_data(func_graph_output, outputs[i])
return outputs
class Function(object):
"""Callable object encapsulating a function definition and its gradient.
`Function` is a callable that encapsulates a function definition and
is differentiable under `tf.GradientTape` objects.
"""
def __init__(self, func_graph, attrs=None, signature=None):
"""Initialize a Function.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
signature: a nested sequence of `TensorSpec` objects specifying the input
signature of this function.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
self._arg_keywords = None
self._num_positional_args = None
self._func_graph = func_graph
self._captured_inputs = list(self._func_graph.captures.keys())
self._num_outputs = len(self._func_graph.outputs)
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs or {})
self._inference_function = _EagerDefinedFunction(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, self._attrs)
self._backward_graph_function = None
self._signature = signature
self._gradient_name = None
def __call__(self, *args, **kwargs):
"""Executes the wrapped function.
Args:
*args: Tensors or Variables. Positional arguments are only accepted when
they correspond one-to-one with arguments of the traced Python function.
**kwargs: Tensors or Variables specified by name. When
`get_concrete_function` was called to create this `Function`, each
Tensor input was given a name, defaulting to the name of the Python
function's argument but possibly overridden by the `name=` argument to
`tf.TensorSpec`. These names become the argument names for the concrete
function.
Returns:
The result of applying the TF function on the given Tensors.
Raises:
AssertionError: If this `Function` was not created through
`get_concrete_function`.
ValueError: If arguments contains anything other than Tensors or
Variables.
TypeError: For invalid positional/keyword argument combinations.
"""
if self._arg_keywords is None or self._num_positional_args is None:
if self._signature is not None:
if kwargs:
raise NotImplementedError(
"Keyword arguments not supported when calling a "
"wrap_function-decorated function.")
return self._call_flat(args)
raise AssertionError(
"Tried to call a concrete function obtained from an internal API "
"through the public interface. Use get_concrete_function instead.")
if len(args) > self._num_positional_args:
raise TypeError(
("Expected at most {} positional arguments ({}), got {}. When "
"calling a concrete function, positional arguments may not be bound "
"to Tensors within nested structures.").format(
self._num_positional_args,
self._arg_keywords[:self._num_positional_args],
args))
args = list(args)
for keyword in self._arg_keywords[len(args):]:
args.append(kwargs.pop(compat.as_str(keyword)))
if kwargs:
positional_arg_keywords = set(self._arg_keywords[:len(args)])
for unused_key in kwargs:
if unused_key in positional_arg_keywords:
raise TypeError("Got two values for keyword '{}'.".format(unused_key))
raise TypeError("Keyword arguments {} unknown.".format(kwargs.keys()))
return self._call_flat(args)
def _filtered_call(self, args, kwargs):
"""Executes the function, filtering arguments from the Python function.
Objects aside from Tensors and Variables are ignored.
Args:
args: Canonicalized positional arguments of the Python function.
kwargs: Canonicalized keyword arguments of the Python function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
"""
return self._call_flat(
(t for t in nest.flatten((args, kwargs))
if isinstance(
t, (ops.Tensor, resource_variable_ops.ResourceVariable))))
def _call_flat(self, args):
"""Executes the wrapped function.
Args:
args: a list of Tensors or Variables.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If `args` contains anything other than Tensors or Variables.
"""
ctx = context.context()
for v in self._func_graph.variables:
if v.trainable:
tape.variable_accessed(v)
tensor_inputs = []
for i, arg in enumerate(args):
if isinstance(arg, resource_variable_ops.ResourceVariable):
if arg.trainable:
tape.variable_accessed(arg)
tensor_inputs.append(arg.handle)
elif isinstance(arg, ops.Tensor):
tensor_inputs.append(arg)
elif (self._signature is not None and
isinstance(self._signature[i], tensor_spec.TensorSpec)):
tensor_inputs.append(
ops.convert_to_tensor(arg, self._signature[i].dtype))
else:
raise ValueError("All inputs to `Function`s must be Tensors; "
"on invocation of %s, the %d-th input (%s) was not a "
"Tensor." % (self._func_graph.name, i, str(arg)))
args = tensor_inputs + self._captured_inputs
if (tape.should_record(tensor_inputs) or
tape.should_record(self._captured_inputs)):
if context.executing_eagerly():
return self._eager_backprop_call(args)
else:
return self._backprop_call_with_delayed_rewrite(args)
# Only need to override the gradient in graph mode and when we have outputs.
if context.executing_eagerly() or not self.outputs:
outputs = self._inference_function.call(ctx, args)
else:
if not self._gradient_name:
self._gradient_name = "PartitionedCall-%s" % ops.uid()
self._register_gradient(self._gradient_name)
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._inference_function.call(ctx, args)
return self._build_call_outputs(outputs)
def _register_gradient(self, name):
"""Registers the gradient for the current Function under the given name.
The gradient rewrites an inference call op to a forward call op, but does
not modify a pre-existing forward call op. It then computes the gradient
from the output's gradients and the side outputs of the forward op.
Args:
name: The name to register the gradient as.
"""
@ops.RegisterGradient(name)
def _registered_grad_fn(op, *doutputs): # pylint: disable=unused-variable
return self._grad_fn(op, *doutputs)
def _grad_fn(self, op, *doutputs):
"""Gradients of this function."""
if self._backward_graph_function is None:
self._construct_backprop_function()
# pylint: disable=protected-access
self._forward_function.add_to_graph(op.graph)
num_inference_outputs = self._inference_function._num_outputs
# Rewrite an inference call op to be a forward call op
if op.get_attr("f").name.encode() == self._inference_function.name:
op._set_func_attr("f", self._forward_function.name)
op._set_type_list_attr("Tout", self._forward_function._output_types)
op._add_outputs(
self._forward_function._output_types[num_inference_outputs:],
self._forward_function._output_shapes[num_inference_outputs:])
for i in range(num_inference_outputs, len(op.outputs)):
func_graph_output = self._forward_function._func_graph_outputs[i]
custom_gradient.copy_handle_data(func_graph_output, op.outputs[i])
# pylint: enable=protected-access
# Compute the gradients using the side outputs
side_outputs = op.outputs[num_inference_outputs:]
args = list(doutputs[:num_inference_outputs]) + list(side_outputs)
return self._backward_graph_function._call_flat( # pylint: disable=protected-access
(a for a in args if a is not None))
@property
def name(self):
"""Function name."""
return self._inference_function.name
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to return values."""
return self._func_graph.outputs
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return self._captured_inputs
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._inference_function.definition
@property
def output_shapes(self):
"""The function's output shapes."""
# TODO(ebrevdo): Should we only keep the output shapes associated
# with len(self._python_returns) outputs?
# TODO(akshayka): Consider removing this.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Extract the shape of the `IndexedSlices` object's `values` field.
outputs_list[i] = self._output_shapes[j] # the `values` shape
if o.dense_shape is not None:
j += 3 # skip over shapes for `values`, `indices`, `dense_shape`
else:
j += 2 # skip over shapes for `values`, `indices`
else:
outputs_list[i] = self._output_shapes[j]
j += 1
return nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(lambda x: x.dtype if x is not None else None,
self._func_graph.structured_outputs)
def add_to_graph(self, g=None, register_gradient_functions=False):
"""Registers the function, adds it to the graph g or default graph."""
# If we are not executing eagerly, adds the function to default graph if no
# graph is specified.
# In case of eager execution, function definition gets added to context
# during construction itself.
# TODO(allel/shivaniagrawal): rename this to register to reflect the
# method's functionality better. Remove register_gradient_functions argument
# and figure out if these needs to be registered.
if not context.executing_eagerly() or g:
if not g:
g = ops.get_default_graph()
self._inference_function.add_to_graph(g) # pylint: disable=protected-access
# pylint: disable=protected-access
if register_gradient_functions:
# There are two situations for the actual call of a defun:
# 1. If none of the input args are resource variables or watch by any
# tape, and it will run the _inference_function of concrete_func for
# forward pass, the gradient will be generated by standard mechanism.
# 2. Otherwise, defun will create two functions, one for forward pass,
# and the backward pass will be created via tape.
# When registering the function, we register both cases.
if self._backward_graph_function is None:
self._construct_backprop_function()
forward_function = self._forward_function
backward_function = self._backward_graph_function._inference_function
# pylint: enable=protected-access
forward_function.add_to_graph(g)
backward_function.add_to_graph(g)
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
forward_function_name = _forward_name(self._func_graph.name)
outputs = [x for x in self._func_graph.outputs
if gradients_impl.IsTrainable(x)]
with backwards_graph.as_default():
gradients_wrt_outputs = [
graph_placeholder(x.dtype, x.shape) for x in outputs
]
gradients_wrt_inputs = gradients_impl._GradientsHelper( # pylint: disable=protected-access
outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
backwards_graph_captures = list(backwards_graph.captures.keys())
backward_function_attr = _parse_func_attrs(
{FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name})
backward_function_attr.update(self._attrs)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `self._backward_graph_function` correspond to outputs of
# `self._forward_function`.
backwards_graph.inputs = gradients_wrt_outputs + list(
backwards_graph.captures.values())
# Clear captures, since we pass them in as inputs.
backwards_graph.captures = {}
backwards_graph.outputs.extend(
grad for grad in func_graph_module.flatten(gradients_wrt_inputs)
if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
self._backward_graph_function = Function(
backwards_graph, attrs=backward_function_attr)
forward_function_attr = _parse_func_attrs({
BACKWARD_FUNCTION_ATTRIBUTE_NAME:
self._backward_graph_function._inference_function.name}) # pylint: disable=protected-access
forward_function_attr.update(self._attrs)
self._forward_function = _EagerDefinedFunction(
forward_function_name, self._func_graph, self._func_graph.inputs,
self._func_graph.outputs + backwards_graph_captures,
forward_function_attr)
def _eager_backprop_call(self, args):
"""Calls the forward function and records the result on a tape.
This method fully constructs the forward and backward functions before
calling the function and recording them on the tape.
(Only records results on a tape if the function has outputs).
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
if self._backward_graph_function is None:
self._construct_backprop_function()
ctx = context.context()
if not self._gradient_name:
self._gradient_name = "PartitionedCall-%s" % ops.uid()
self._register_gradient(self._gradient_name)
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._forward_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
# `real_outputs` are the actual outputs of the inference graph function;
# `side_outputs` are the intermediate Tensors that were added as outputs to
# the forward graph function so that we can compute its gradient.
real_outputs = outputs[:self._num_outputs]
skip_positions = [i for i, t in enumerate(real_outputs)
if not gradients_impl.IsTrainable(t)]
side_outputs = outputs[self._num_outputs:]
def backward_function(*args):
args = [a for i, a in enumerate(args)
if a is not None and i not in skip_positions]
return self._backward_graph_function._call_flat( # pylint: disable=protected-access
list(args) + side_outputs)
tape.record_operation(self._forward_function.signature.name, real_outputs,
args, backward_function)
return self._build_call_outputs(real_outputs)
def _backprop_call_with_delayed_rewrite(self, args):
"""Calls the inference function and records the result on a tape.
The recorded backwards function will construct the backwards graph and
rewrite the inference function to the forward function. This only happens
if the recorded backwards function ends up being used to compute gradients.
This approach avoids constructing unnecessary graphs, but it only works if
we are calling this function when not executing eagerly.
(Only records results on a tape if the function has outputs)
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
ctx = context.context()
if not self._gradient_name:
self._gradient_name = "PartitionedCall-%s" % ops.uid()
self._register_gradient(self._gradient_name)
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._inference_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
call_op = outputs[0].op
def backward_function(*args):
return self._grad_fn(call_op, *args)
tape.record_operation(self._inference_function.signature.name, outputs,
args, backward_function)
return self._build_call_outputs(outputs)
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
if self._func_graph.structured_outputs is None:
return result
# Use `nest.flatten` instead of `func_graph_module.flatten` in order to
# preserve any IndexedSlices in `self._func_graph.structured_outputs`.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Repack Tensors for IndexedSlices.
if o.dense_shape is not None:
outputs_list[i] = ops.IndexedSlices(
values=result[j],
indices=result[j + 1],
dense_shape=result[j + 2])
j += 3
else:
outputs_list[i] = ops.IndexedSlices(
values=result[j], indices=result[j + 1])
j += 2
else:
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
return ret
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def _encode_arg_for_serialization(arg):
"""A representation for this argument, for serializing signatures."""
if isinstance(arg, ops.Tensor):
return tensor_spec.TensorSpec(arg.shape, arg.dtype)
else:
return UnknownArgument()
pywrap_tensorflow.RegisterType("Tensor", ops.Tensor)
pywrap_tensorflow.RegisterType("IndexedSlices", ops.IndexedSlices)
def _deterministic_dict_values(dictionary):
return tuple(dictionary[key] for key in sorted(dictionary))
class PolymorphicFunction(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `defun` for more information on the semantics of
defined functions.
PolymorphicFunction class is thread-compatible meaning that minimal
usage of defuns (defining and calling) is thread-safe, but if users call other
methods or invoke the base `python_function` themselves, external
synchronization is necessary.
"""
def __init__(self,
python_function,
name,
input_signature=None,
attributes=None,
autograph=True):
"""Initializes a polymorphic function.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
attributes: dict, extra keyword arguments that will be added as attribute
of the function.
autograph: whether to use autograph to compile
`python_function`. See https://www.tensorflow.org/guide/autograph for
more information.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
if isinstance(python_function, functools.partial):
self._python_function = python_function.func
self._args_to_prepend = python_function.args or tuple()
self._kwargs_to_include = python_function.keywords or {}
else:
self._python_function = python_function
self._args_to_prepend = tuple()
self._kwargs_to_include = {}
self._name = name
self._autograph = autograph
self._function_cache = collections.OrderedDict()
self._function_attributes = attributes or {}
self._lock = threading.Lock()
# _descriptor_cache is a of instance of a class to an instance-specific
# PolymorphicFunction, used to make sure defun-decorated methods create
# different functions for each instance.
self._descriptor_cache = weakref.WeakKeyDictionary()
fullargspec = tf_inspect.getfullargspec(self._python_function)
if tf_inspect.ismethod(self._python_function):
# Remove `self`: default arguments shouldn't be matched to it.
args = fullargspec.args[1:]
else:
args = fullargspec.args
# A cache mapping from argument name to index, for canonicalizing
# arguments that are called in a keyword-like fashion.
self._args_to_indices = {arg: i for i, arg in enumerate(args)}
self._arg_names = args
self._vararg_name = fullargspec.varargs
# A cache mapping from arg index to default value, for canonicalization.
offset = len(args) - len(fullargspec.defaults or [])
self._arg_indices_to_default_values = {
offset + index: default
for index, default in enumerate(fullargspec.defaults or [])
}
self._default_values = fullargspec.defaults
self._default_values_start_index = offset
if input_signature is None:
self._input_signature = None
else:
if fullargspec.varkw is not None or fullargspec.kwonlyargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if not isinstance(input_signature, (tuple, list)):
raise TypeError("input_signature must be either a tuple or a "
"list, received " + str(type(input_signature)))
self._input_signature = tuple(input_signature)
self._flat_input_signature = tuple(nest.flatten(input_signature))
def __call__(self, *args, **kwargs):
"""Calls a graph function specialized to the inputs."""
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
@property
def python_function(self):
"""Returns the wrapped Python function."""
return self._python_function
def _get_concrete_function_internal(self, *args, **kwargs):
"""Bypasses error checking when getting a graph function."""
if self._input_signature:
args, kwargs = None, None
graph_function, _, _ = self._maybe_define_function(args, kwargs)
return graph_function
def get_concrete_function(self, *args, **kwargs):
"""Returns a `Function` object specialized to inputs and execution context.
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
"""
if self._input_signature:
if kwargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if args:
# If args are provided, they must match the input signature.
try:
nest.assert_same_structure(self._input_signature, args)
except (ValueError, TypeError):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
flat_inputs = nest.flatten(args)
if any(not isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec))
for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors or "
"tf.TensorSpec objects.")
if any(not spec.is_compatible_with(other)
for spec, other in zip(self._flat_input_signature, flat_inputs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(args), str(self._input_signature)))
args, kwargs = None, None
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
if self._input_signature:
args = self._input_signature
kwargs = {}
seen_names = set()
captured = frozenset(graph_function.graph.internal_captures)
allowed_positional = 0
if args:
for outer_arg in args:
# TODO(allenl): Consider allowing arguments with defaults in the Python
# function's signature to be passed as positional arguments to the
# concrete function.
if not isinstance(
outer_arg,
(ops.Tensor, resource_variable_ops.ResourceVariable,
tensor_spec.TensorSpec)):
break
allowed_positional += 1
# pylint: disable=protected-access
graph_function._num_positional_args = allowed_positional
graph_function._arg_keywords = []
# pylint: enable=protected-access
for arg in graph_function.graph.inputs:
if arg in captured:
break
user_arg_name = arg.op.get_attr("_user_specified_name")
if user_arg_name in seen_names:
raise ValueError(
("Unable to construct a concrete function for {} since some "
"arguments do not have unique names. Got two arguments named "
"'{}'. When constructing a concrete TensorFlow function from a "
"Python function which takes nested structures or variadic "
"positional arguments, pass unique names to tf.TensorSpec objects "
"used to identify these Tensor inputs. These names may then be "
"used as keyword arguments to the concrete function.")
.format(
self._python_function,
compat.as_str(arg.op.get_attr("_user_specified_name"))))
seen_names.add(user_arg_name)
graph_function._arg_keywords.append(user_arg_name) # pylint: disable=protected-access
return graph_function
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `PolymorphicFunction` was
# accessed through; e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `PolymorphicFunction` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`). We create a
# new instance of PolymorphicFunction here to allow different instances each
# to create variables once, thereby allowing methods to be decorated with
# defun. Keeps a cache to avoid retracing the function every time the
# descriptor is accessed.
if instance not in self._descriptor_cache:
if instance is None:
return self
# If there is no instance-specific polymorphic func in the cache,
# we construct an instance-specific polymorphic function
# that uses a weak reference to the instance (so that the instance will
# be correctly gc'd).
# And finally add the wrapped function to the description cache
self._descriptor_cache[instance] = class_method_to_instance_method(
self, instance)
# Return the cached polymorphic function for the instance
return self._descriptor_cache[instance]
def _cache_key(self, args, kwargs):
"""Computes the cache key given inputs and execution context."""
if self._input_signature is None:
inputs = (args, kwargs) if kwargs else args
input_signature = pywrap_tensorflow.TFE_Py_EncodeArg(inputs)
else:
del args, kwargs
input_signature = self._flat_input_signature
ctx = context.context()
# Don't need to open an init_scope if the _cache_key call is in eager mode
# already.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None
if not executing_eagerly:
with ops.init_scope():
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None if executing_eagerly else ops.get_default_graph()
# pylint: disable=protected-access
default_graph = ops.get_default_graph()
# TODO(b/117617952): The current distribution strategy will affect graph
# building (e.g. accessing different variables from different devices) and
# so requires retracing for each device.
uses_distribution_strategy = bool(
default_graph._distribution_strategy_stack)
if executing_eagerly:
colocation_stack = ()
uses_xla = ctx.device_spec.device_type == "TPU"
if uses_distribution_strategy or uses_xla:
device_functions = (pydev.merge_device(ctx.device_name),)
else:
device_functions = ()
else:
colocation_stack = tuple(default_graph._colocation_stack.peek_objs())
uses_xla = getattr(default_graph, "_xla_compile", False)
if (uses_distribution_strategy
or uses_xla
or func_graph_module.device_stack_has_callable(
default_graph._device_function_stack)):
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = tuple(default_graph._device_functions_outer_to_inner)
else:
device_functions = ()
# pylint: enable=protected-access
return CacheKey(input_signature, parent_graph, device_functions,
colocation_stack, uses_xla)
def _canonicalize_function_inputs(self, *args, **kwargs):
"""Canonicalizes `args` and `kwargs`.
Canonicalize the inputs to the Python function using its fullargspec. In
particular, we parse the varags and kwargs that this
`PolymorphicFunction` was called with into a tuple corresponding to the
Python function's positional (named) arguments and a dictionary
corresponding to its kwargs.
Args:
*args: The varargs this object was called with.
**kwargs: The keyword args this function was called with.
Returns:
A canonicalized ordering of the inputs.
Raises:
ValueError: If a keyword in `kwargs` cannot be matched with a positional
argument when an input signature is specified, or when the inputs
do not conform to the input signature.
"""
args = self._args_to_prepend + args
kwargs = dict(kwargs, **self._kwargs_to_include)
if not kwargs:
if self._default_values:
inputs = args + self._default_values[len(args) -
self._default_values_start_index:]
else:
inputs = args
else:
# Maps from index of arg to its corresponding value, according to `args`
# and `kwargs`; seeded with the default values for the named args that
# aren't in `args`.
arg_indices_to_values = {
index: default for index, default in six.iteritems(
self._arg_indices_to_default_values) if index >= len(args)
}
consumed_args = []
for arg, value in six.iteritems(kwargs):
index = self._args_to_indices.get(arg, None)
if index is not None:
arg_indices_to_values[index] = value
consumed_args.append(arg)
elif self._input_signature is not None:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
for arg in consumed_args:
# After this loop, `kwargs` will only contain true keyword arguments, as
# opposed to named arguments called in a keyword-like fashion.
kwargs.pop(arg)
inputs = args + _deterministic_dict_values(arg_indices_to_values)
flat_inputs = nest.flatten(inputs)
# Check for NumPy arrays in arguments and convert them to Tensors.
# TODO(nareshmodi): Skip ndarray conversion to tensor altogether, perhaps
# finding a way to store them directly in the cache key (currently not
# possible since ndarrays are not hashable).
need_packing = False
for index, value in enumerate(flat_inputs):
if type(value) == np.ndarray:
flat_inputs[index] = constant_op.constant(value)
need_packing = True
if need_packing:
inputs = nest.pack_sequence_as(structure=inputs,
flat_sequence=flat_inputs)
if self._input_signature is None:
return inputs, kwargs
else:
assert not kwargs
signature_relevant_inputs = inputs[:len(self._input_signature)]
try:
nest.assert_same_structure(self._input_signature,
signature_relevant_inputs)
except (ValueError, TypeError):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
signature_inputs_flat = nest.flatten(signature_relevant_inputs)
if any(not pywrap_tensorflow.IsTensor(arg)
for arg in signature_inputs_flat):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors.")
if any(not spec.is_compatible_with(other)
for spec, other in zip(self._flat_input_signature,
signature_inputs_flat)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(inputs), str(self._input_signature)))
return inputs, {}
def _maybe_define_function(self, args, kwargs):
"""Gets a function for these inputs, defining it if necessary.
`args` and `kwargs` can be None if this `PolymorphicFunction` was created
with an `input_signature`.
Args:
args: The varargs for the Python function.
kwargs: The keyword args for the Python function.
Returns:
A graph function corresponding to the input signature implied by args and
kwargs, as well as the inputs that the object should be called with.
Raises:
ValueError: If inputs are incompatible with the input signature.
TypeError: If the function inputs include non-hashable objects
"""
if self._input_signature is None or args is not None or kwargs is not None:
args, kwargs = self._canonicalize_function_inputs(*args, **kwargs)
cache_key = self._cache_key(args, kwargs)
with self._lock:
try:
graph_function = self._function_cache.get(cache_key, None)
except TypeError:
raise TypeError("Arguments supplied to `defun`-generated functions "
"must be hashable.")
if graph_function is None:
logging.vlog(1,
"Creating new FuncGraph for Python function %r (key: %r)",
self._python_function, cache_key)
if self._input_signature is None:
arglen = len(args)
else:
arglen = len(self._input_signature)
arg_names = (
self._arg_names[:arglen]
+ [self._vararg_name] * (arglen - len(self._arg_names)))
graph_function = Function(
func_graph_module.func_graph_from_py_func(
self._name,
self._python_function,
args,
kwargs,
self._input_signature,
autograph=self._autograph,
arg_names=arg_names),
self._function_attributes)
if self._input_signature:
python_call_signature = self._input_signature
else:
python_call_signature = tuple(
_encode_arg_for_serialization(arg) for arg in args)
# Save information about non-Tensor arguments with the concrete
# function. Used to serialize PolymorphicFunctions.
graph_function._python_call_signature = python_call_signature # pylint: disable=protected-access
self._function_cache[cache_key] = graph_function
return graph_function, args, kwargs
def register(func, *args, **kwargs):
"""Register a specialization of a PolymorphicFunction into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the PolymorphicFunction instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `Function` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
if not isinstance(func, PolymorphicFunction):
raise ValueError("Only defun function is allowed to be registered. "
"Got type: %s" % type(func))
concrete_func = func.get_concrete_function(*args, **kwargs)
concrete_func.add_to_graph(register_gradient_functions=True)
return concrete_func
def validate_signature(signature):
if any(not isinstance(arg, tensor_spec.TensorSpec)
for arg in nest.flatten(signature)):
raise TypeError("Invalid input_signature %s; input_signature must be "
"a possibly nested sequence of TensorSpec objects.")
def defun(func=None, input_signature=None, autograph=True):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") compiles a Python function
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
of the shapes and dtypes of the Python function's Tensor-valued arguments and
the values of its non-Tensor Python objects.
When eager execution is enabled, the ability to create graphs from Python
functions makes it possible to incrementally trade off debugability and
interactivity for performance. Functions compiled with `defun` cannot be
inspected with `pdb`; however, executing a graph
generated by `defun` sometimes takes less time and memory than eagerly
executing the corresponding Python function, since specifying computations as
graphs allows for optimizations like automatic buffer reuse and
parallelization among ops. Note that executing a `defun`-compiled function
incurs a small constant overhead, so eagerly executing sufficiently small
Python functions might take less time than executing their corresponding
`defun`-generated graphs.
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
present in its corresponding graph), but it is not yet possible to execute the
generated graphs across multiple machines.
_Example Usage_
```python
import tensorflow as tf
tf.enable_eager_execution()
# A simple example.
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.contrib.eager.defun(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# `defun` is capable of compiling Python functions that close over Python
# objects, including Tensors and Variables.
@tf.contrib.eager.defun
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# `defun` automatically lifts variables out of the graphs it creates,
# allowing you to compile the `call` methods of `tf.keras.layers.Layer` and
# `tf.keras.Model` objects.
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.keep_probability = keep_probability
@tf.contrib.eager.defun
def call(self, inputs, training=True):
x = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(x, self.keep_probability)
else:
return x
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
# `defun`-compiled functions are differentiable.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.GradientTape() as tape:
outputs = model(x)
gradient = tape.gradient(outputs, model.trainable_variables)
optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,
model.trainable_variables))
```
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
generates and placed in the eager context if executing eagerly or into an
outer graph otherwise.
_Input Signatures_
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
for every unique sequence of the shapes and dtypes of Tensor arguments and
the values of Python objects it is invoked with. For example, calling
`F(tf.random_uniform([2])` will execute a different graph than
`F(tf.random_uniform([3])` because the two inputs have different shapes.
The first time that `F(*args, **kwargs)` is called with a particular sequence
of Tensor shapes and dtypes and Python values, it constructs a graph by
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
input signature inferred from `(*args, **kwargs)` and cached for future reuse.
NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects
before being passed to `f`, and are treated as Tensors for caching. This
allows a function to be called multiple times with NumPy arrays having
different values but the same shape and dtype without re-tracing each time.
`tf.contrib.eager.defun` caches graphs for your convenience, letting you
define TensorFlow functions without explicitly specifying their signatures.
However, this policy is conservative and potentially expensive; for example,
when different invocations of your function have differently-shaped Tensor
inputs, this policy might generate more graph functions than necessary. To
eliminate such costs, `tf.contrib.eager.defun` allows you to supply an
optional `input_signature` argument specifying the shapes and dtypes of the
inputs. In particular, the shapes may be partially unspecified, with `None`s
in the unknown dimensions. When an input signature is provided,
`tf.contrib.eager.defun` will only instantiate a single graph for the
decorated Python function. The following is an example:
```python
import tensorflow as tf
# The first `TensorSpec` below describes the shape and dtype of `words`,
# and the second describes the shape and dtype of `another_tensor`. Note that
# the last dimension of the `words` `TensorSpec` is left unspecified.
@tf.contrib.eager.defun(input_signature=[
tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),
tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)
])
def my_sequence_model(words, another_tensor):
...
# Note how the third dimension of the first input can vary freely.
words = tf.random_uniform(([50, 300, 10])
second_input = tf.random_uniform([300, 100])
my_sequence_model(words, second_input)
words = tf.random_uniform(([50, 300, 20])
my_sequence_model(words, second_input)
# Passing an input with an incompatible shape will raise an error.
words = tf.random_uniform(([50, 100, 20])
my_sequence_model(words, second_input) # <---- This will raise an error.
```
Python functions that are compiled with an `input_signature` must only accept
Tensors as arguments and must not take unnamed keyword arguments (**kwargs).
_Tracing_
Be aware that because `F` only logs TensorFlow operations, all the other
Python code that `f` executes will only shape the _construction_ of the graphs
that `F` executes: the Python code won't be executed when the graphs
themselves are executed, though it will be executed every time the Python
function is traced (and a given Python function might be traced multiple
times, once for each input signature it is invoked with). For example, whereas
the Python function
```python
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
```
will return a different output everytime it is invoked, the compiled function
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
every time it is called, since a particular random offset generated by NumPy
will be inserted into the graph as a TensorFlow constant. The solution is to
replace the call to `np.random.randn` with `tf.random_normal((5, 5))`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `f` has Python side-effects, then executing `f` multiple times
will not necessarily be semantically equivalent to executing `F =
tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact
that `defun` only captures the subgraph of TensorFlow operations that is
constructed when `f` is called in a graph-building context.
_Python Control Flow_
The structure of many machine learning computations depend upon whether one is
training or validating, and it is common to nest specialized logic under `if
training:` blocks. By mapping each input signature to a unique graph, `defun`
lets users transparently compile such code, as the following code snippet
demonstrates:
```python
import tensorflow as tf
tf.enable_eager_execution()
@tf.contrib.eager.defun
def lossy_matmul(W, x, training=True):
outputs = tf.matmul(W, x)
if training:
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
return outputs
W = tf.random_normal((3, 5))
x = tf.random_normal((5, 1))
# Executes a graph that applies dropout.
lossy_outputs = lossy_matmul(W, x, training=True)
# Executes a graph that does not apply dropout.
exact_outputs = lossy_matmul(W, x, training=False)
```
_TensorFlow Control Flow_
When `autograph` is `True`, data-dependent control flow is allowed as well.
Control flow statements that depend on `Tensor` values are staged into
corresponding TensorFlow ops. For example, the following code will work as
expected:
```python
@tf.contrib.eager.defun
def dynamic_rnn_loop(cell, seq):
state, output = cell.zero_state()
for input in seq:
state, output = cell(input, state)
return output
```
For more information see `tf.autograph`.
_Variables_
TensorFlow operations related to variable creation and initialization are
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
functions and their corresponding compiled functions. For example:
```python
import tensorflow as tf
tf.enable_eager_execution()
def fn():
x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
# `fn` is a Python function, so x is created, initialized, and destroyed upon
# every invocation
assert fn().numpy() == fn().numpy() == 1.0
compiled = tf.contrib.eager.defun(fn)
# Compiling `fn` with `defun` hoists all variables outside of the generated
# graph, so initialization happens exactly once.
assert compiled().numpy() == 1.0
assert compiled().numpy() == 2.0
```
Finally, because each input signature is bound to a unique graph, if your
Python function constructs `tf.Variable` objects, then each graph constructed
for that Python function will reference a unique set of variables. To
circumvent this problem, we recommend against compiling Python functions that
create `tf.Variable` objects. Instead, Python functions should either
lexically close over `tf.Variable` objects or accept them as arguments,
preferably encapsulated in an object-oriented container. If you must create
variables inside your Python function and you want each graph generated for it
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
decorator that can be invoked with a single argument - `func`. The
end result is equivalent to providing all the arguments up front.
In other words, defun(input_signature=...)(func) is equivalent to
defun(func, input_signature=...). The former allows
the following use case:
@tf.contrib.eager.defun(input_signature=...)
def foo(...):
...
input_signature: A possibly nested sequence of
`tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of
the Tensors that will be supplied to this function. If `None`, a separate
function is instantiated for each inferred input signature. If a
signature is specified, every input to `func` must be a `Tensor`, and
`func` cannot accept `**kwargs`.
autograph: Whether `func` should be compiled before
constructing the graph. See https://www.tensorflow.org/guide/autograph
for more information.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`tf.contrib.eager.TensorSpec` objects.
"""
return defun_with_attributes(
func=func,
input_signature=input_signature,
autograph=autograph)
def defun_with_attributes(func=None,
input_signature=None,
attributes=None,
autograph=True):
"""Compiles a Python function into a callable TensorFlow graph.
This function supports adding extra function attributes. See detailed
documentation in defun(). Currently this is not exposed in public API since we
don't expect user to directly use attributes, and attribute won't work by
itself. This assumption might change in future.
Args:
func: function to be compiled.
input_signature: same as defun()'s input_signature.
attributes: A dictionary of arguments which will be added to function def as
attributes. Currently only support primitive types as value, and only
whitelisted attribute name is allowed. Unwhitelisted attribute name or
unsupported value will result into ValueError. `func_name` is also one of
the whitelisted argument which is a python string, and sets the name for
this `Function` in the graph.
autograph: same as defun()'s autograph.
Returns:
Same as the return value of defun, with attributes added to the function in
graph.
"""
if input_signature is not None:
validate_signature(input_signature)
# TODO(apassos): deal with captured global state. Deal with control flow.
def decorated(function):
try:
if attributes:
name = attributes.pop("func_name", function.__name__)
else:
name = function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
function,
PolymorphicFunction(
function,
name,
input_signature=input_signature,
attributes=attributes,
autograph=autograph))
# This code path is for the `foo = tfe.defun(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tfe.defun(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tfe.defun(...)(foo)`
return decorated
# When a method is bound to objects of this type, it allows AutoGraph to
# recover a weak reference the original method's self pointer. This uses the
# mechanism from pyct.inspect_utils.getmethodclass.
# TODO(b/119246461): This is not pretty. Use a descriptor instead?
class _WeakrefSelf(object):
def __init__(self, target):
self.ag_self_weakref__ = target
def class_method_to_instance_method(original_function, instance):
"""Constructs a new PolymorphicFunction with `self` bound."""
weak_instance = weakref.ref(instance)
# Note: while we could bind to a weakref proxy instead, that causes the
# bound method to be unhashable.
bound_method = types_lib.MethodType(original_function.python_function,
_WeakrefSelf(weak_instance))
# original_function is expected to be of one of the two PolymorphicFunction
# types (defined either in function.py or def_function.py).
assert hasattr(original_function, "_name")
assert hasattr(original_function, "_autograph")
assert hasattr(original_function, "_input_signature")
assert hasattr(original_function, "python_function")
def bound_method_wrapper(*args, **kwargs):
"""Wraps either a dummy MethodType or a converted AutoGraph function."""
# __wrapped__ allows AutoGraph to swap in a converted function.
wrapped_fn = bound_method_wrapper.__wrapped__
if wrapped_fn is bound_method_wrapper.__original_wrapped__:
# If __wrapped__ was not replaced, then call original_function.
wrapped_fn = original_function.python_function
if tf_inspect.ismethod(wrapped_fn):
wrapped_fn = six.get_unbound_function(wrapped_fn)
return wrapped_fn(weak_instance(), *args, **kwargs)
# If __wrapped__ was replaced, then it is always an unbound function
# that takes self as first argument.
return wrapped_fn(weak_instance(), *args, **kwargs)
# pylint: disable=protected-access
# We make a dummy MethodType object to generate the correct bound method
# signature. The actual call is to a function with a weak reference to
# `instance`.
instance_func = type(original_function)(
tf_decorator.make_decorator(bound_method, bound_method_wrapper),
name=original_function._name,
autograph=original_function._autograph,
input_signature=original_function._input_signature)
# pylint: enable=protected-access
# And we wrap the function with tf_decorator so inspection works correctly
wrapped_instance_func = tf_decorator.make_decorator(
original_function.python_function, instance_func)
return wrapped_instance_func
| {
"content_hash": "396e56cc215fbdf58ba94d059209837c",
"timestamp": "",
"source": "github",
"line_count": 1657,
"max_line_length": 105,
"avg_line_length": 40.92939046469523,
"alnum_prop": 0.6817752875258036,
"repo_name": "asimshankar/tensorflow",
"id": "6770f1d3b3745906ebf4c13f24591c1d79e2d909",
"size": "68549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "490070"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "52677142"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39454"
},
{
"name": "Go",
"bytes": "1290930"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "890529"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102518"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43038983"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497659"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['arni_gui'],
package_dir={'': 'src'},
)
setup(**setup_args)
| {
"content_hash": "aa1bdca166ded693e174c20cd320b683",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 24.5,
"alnum_prop": 0.726530612244898,
"repo_name": "andreasBihlmaier/arni",
"id": "2f38067d77a869bba88289cfc1b41fe7c5a2df99",
"size": "308",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "arni_gui/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CMake",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "416094"
},
{
"name": "QMake",
"bytes": "850"
}
],
"symlink_target": ""
} |
"""Query build slave hardware info, and print it to stdout as csv."""
import csv
import json
import logging
import sys
import urllib2
_MASTERS = [
'chromium.perf',
'client.catapult',
'tryserver.chromium.perf',
'tryserver.client.catapult',
]
_KEYS = [
'master', 'builder', 'hostname',
'os family', 'os version',
'product name', 'architecture', 'processor count', 'processor type',
'memory total',
'facter version', 'git version', 'puppet version', 'python version',
'ruby version',
'android device 1', 'android device 2', 'android device 3',
'android device 4', 'android device 5', 'android device 6',
'android device 7',
]
_EXCLUDED_KEYS = frozenset([
'architecture (userland)',
'b directory',
'last puppet run',
'uptime',
'windows version',
])
def main():
writer = csv.DictWriter(sys.stdout, _KEYS)
writer.writeheader()
for master_name in _MASTERS:
master_data = json.load(urllib2.urlopen(
'http://build.chromium.org/p/%s/json/slaves' % master_name))
slaves = sorted(master_data.iteritems(),
key=lambda x: (x[1]['builders'].keys(), x[0]))
for slave_name, slave_data in slaves:
for builder_name in slave_data['builders']:
row = {
'master': master_name,
'builder': builder_name,
'hostname': slave_name,
}
host_data = slave_data['host']
if host_data:
host_data = host_data.splitlines()
if len(host_data) > 1:
for line in host_data:
if not line:
continue
key, value = line.split(': ')
if key in _EXCLUDED_KEYS:
continue
row[key] = value
# Munge keys.
row = {key.replace('_', ' '): value for key, value in row.iteritems()}
if 'osfamily' in row:
row['os family'] = row.pop('osfamily')
if 'product name' not in row and slave_name.startswith('slave'):
row['product name'] = 'Google Compute Engine'
try:
writer.writerow(row)
except ValueError:
logging.error(row)
raise
if __name__ == '__main__':
main()
| {
"content_hash": "01e474e0ca2d6d9e489340aab2187d98",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 25.57471264367816,
"alnum_prop": 0.5635955056179776,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "85d4ffca7e97b6b01131a90e7df94996c60e9ea2",
"size": "2410",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "experimental/hardware.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
from yapsy.IPlugin import IPlugin
import logging
import requests
import requests_cache
import socket
import json
from xml.etree import ElementTree
import plugin_ilmatieteenlaitos_secret as secret
from datetime import datetime
from jinja2 import Environment, PackageLoader
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
CITY = 'tampere'
REMOTE_API_BASE_URL = "http://data.fmi.fi/fmi-apikey/"
class Ilmatieteenlaitos(IPlugin):
def __init__(self):
cache_path = "plugins/" + __name__
requests_cache.install_cache(cache_path, backend='memory', expire_after=240)
log.debug("Installed cache")
def _get_xml_data(self, url):
""" Helper for fetching xml from url. """
try:
result = requests.get(url)
log.debug('From cache: %s' % result.from_cache)
if result.status_code != 200:
log.error("Server returned %s" % result.status_code)
raise Exception("Server returned %s" % result.status_code)
root = ElementTree.fromstring(result.content)
except requests.exceptions.ConnectionError, err:
log.error(err)
except Exception, err:
log.error(err)
raise err
return root
def _parse_xml_data(self, url):
""" Helper for parsing xml from url. Always returns valid JSON """
root = self._get_xml_data(url)
ns = {
'gml': 'http://www.opengis.net/gml/3.2',
'gmlcov': 'http://www.opengis.net/gmlcov/1.0',
'swe': 'http://www.opengis.net/swe/2.0'
}
assert len(root.findall(".//gmlcov:positions", ns)) == 1, "More than one gmlcov:positions found"
assert len(root.findall(".//gml:doubleOrNilReasonTupleList", ns)) == 1, "More than one gml:doubleOrNilReasonTupleList found"
assert len(root.findall(".//swe:DataRecord", ns)) == 1, "More than one swe:DataRecord found"
forecast = []
try:
# Parse parameter names
field_types = [field.attrib.get('name') for field in root.findall(".//swe:DataRecord", ns)[0]]
# Parse forecast timestamps
for line in root.findall(".//gmlcov:positions", ns)[0].text.splitlines():
split_line = line.split()
if len(split_line) == 3:
#data['dt_time'] = str(datetime.fromtimestamp(data['dt']).strftime('%H:%M'))
forecast.append({'Timestamp': int(split_line[2])})
# Parse parameters for each forecast point in time
forecast_index = 0
for line in root.findall(".//gml:doubleOrNilReasonTupleList", ns)[0].text.splitlines():
split_line = line.split()
if len(split_line) == len(field_types):
for i, value in enumerate(split_line):
forecast[forecast_index][field_types[i]] = float(value)
forecast_index += 1
except Exception, err:
log.error(err)
raise err
return forecast
def get_data(self, args):
""" Return current weather and forecast in json, or json error object on error """
try:
forecast_data = self._parse_xml_data('{}{}/wfs?request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::multipointcoverage&place={}'.format(REMOTE_API_BASE_URL, secret.remote_api_key, CITY))
#current_data = self._get_json_data('{}weather?q={}&units=metric&appid={}'.format(REMOTE_API_BASE_URL, CITY, secret.remote_api_key))
current_data = {"msg": "not implemented"}
return json.dumps({"status": "ok", "current": current_data, "forecast": forecast_data}, indent=1)
except socket.gaierror, err:
log.error(err)
return json.dumps({"status": "error", "message": err})
if(__name__ == "__main__"):
plugin = Ilmatieteenlaitos()
print plugin.get_data(None)
| {
"content_hash": "5d020bee96ab074e1d4d7d452502b293",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 218,
"avg_line_length": 38.6,
"alnum_prop": 0.5978287688132248,
"repo_name": "Vilsepi/radiate",
"id": "973d7100feac4952c524a7035206904ef0fe0e9b",
"size": "4289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radiate-server/plugins/plugin_ilmatieteenlaitos.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1817"
},
{
"name": "HTML",
"bytes": "3838"
},
{
"name": "JavaScript",
"bytes": "5476"
},
{
"name": "Python",
"bytes": "14000"
},
{
"name": "Shell",
"bytes": "1272"
}
],
"symlink_target": ""
} |
"""Support for monitoring a Smappee energy sensor."""
from __future__ import annotations
from dataclasses import dataclass, field
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
POWER_WATT,
)
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN
@dataclass
class SmappeeRequiredKeysMixin:
"""Mixin for required keys."""
sensor_id: str
@dataclass
class SmappeeSensorEntityDescription(SensorEntityDescription, SmappeeRequiredKeysMixin):
"""Describes Smappee sensor entity."""
@dataclass
class SmappeePollingSensorEntityDescription(SmappeeSensorEntityDescription):
"""Describes Smappee sensor entity."""
local_polling: bool = False
@dataclass
class SmappeeVoltageSensorEntityDescription(SmappeeSensorEntityDescription):
"""Describes Smappee sensor entity."""
phase_types: set[str] = field(default_factory=set)
TREND_SENSORS: tuple[SmappeePollingSensorEntityDescription, ...] = (
SmappeePollingSensorEntityDescription(
key="total_power",
name="Total consumption - Active power",
native_unit_of_measurement=POWER_WATT,
sensor_id="total_power",
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
local_polling=True, # both cloud and local
),
SmappeePollingSensorEntityDescription(
key="alwayson",
name="Always on - Active power",
native_unit_of_measurement=POWER_WATT,
sensor_id="alwayson",
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SmappeePollingSensorEntityDescription(
key="power_today",
name="Total consumption - Today",
native_unit_of_measurement=ENERGY_WATT_HOUR,
sensor_id="power_today",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SmappeePollingSensorEntityDescription(
key="power_current_hour",
name="Total consumption - Current hour",
native_unit_of_measurement=ENERGY_WATT_HOUR,
sensor_id="power_current_hour",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SmappeePollingSensorEntityDescription(
key="power_last_5_minutes",
name="Total consumption - Last 5 minutes",
native_unit_of_measurement=ENERGY_WATT_HOUR,
sensor_id="power_last_5_minutes",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SmappeePollingSensorEntityDescription(
key="alwayson_today",
name="Always on - Today",
native_unit_of_measurement=ENERGY_WATT_HOUR,
sensor_id="alwayson_today",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
REACTIVE_SENSORS: tuple[SmappeeSensorEntityDescription, ...] = (
SmappeeSensorEntityDescription(
key="total_reactive_power",
name="Total consumption - Reactive power",
native_unit_of_measurement=POWER_WATT,
sensor_id="total_reactive_power",
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
)
SOLAR_SENSORS: tuple[SmappeePollingSensorEntityDescription, ...] = (
SmappeePollingSensorEntityDescription(
key="solar_power",
name="Total production - Active power",
native_unit_of_measurement=POWER_WATT,
sensor_id="solar_power",
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
local_polling=True, # both cloud and local
),
SmappeePollingSensorEntityDescription(
key="solar_today",
name="Total production - Today",
native_unit_of_measurement=ENERGY_WATT_HOUR,
sensor_id="solar_today",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SmappeePollingSensorEntityDescription(
key="solar_current_hour",
name="Total production - Current hour",
native_unit_of_measurement=ENERGY_WATT_HOUR,
sensor_id="solar_current_hour",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
VOLTAGE_SENSORS: tuple[SmappeeVoltageSensorEntityDescription, ...] = (
SmappeeVoltageSensorEntityDescription(
key="phase_voltages_a",
name="Phase voltages - A",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
sensor_id="phase_voltage_a",
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
phase_types={"ONE", "TWO", "THREE_STAR", "THREE_DELTA"},
),
SmappeeVoltageSensorEntityDescription(
key="phase_voltages_b",
name="Phase voltages - B",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
sensor_id="phase_voltage_b",
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
phase_types={"TWO", "THREE_STAR", "THREE_DELTA"},
),
SmappeeVoltageSensorEntityDescription(
key="phase_voltages_c",
name="Phase voltages - C",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
sensor_id="phase_voltage_c",
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
phase_types={"THREE_STAR"},
),
SmappeeVoltageSensorEntityDescription(
key="line_voltages_a",
name="Line voltages - A",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
sensor_id="line_voltage_a",
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
phase_types={"ONE", "TWO", "THREE_STAR", "THREE_DELTA"},
),
SmappeeVoltageSensorEntityDescription(
key="line_voltages_b",
name="Line voltages - B",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
sensor_id="line_voltage_b",
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
phase_types={"TWO", "THREE_STAR", "THREE_DELTA"},
),
SmappeeVoltageSensorEntityDescription(
key="line_voltages_c",
name="Line voltages - C",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
sensor_id="line_voltage_c",
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
phase_types={"THREE_STAR", "THREE_DELTA"},
),
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Smappee sensor."""
smappee_base = hass.data[DOMAIN][config_entry.entry_id]
entities = []
for service_location in smappee_base.smappee.service_locations.values():
# Add all basic sensors (realtime values and aggregators)
# Some are available in local only env
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=description,
)
for description in TREND_SENSORS
if not service_location.local_polling or description.local_polling
]
)
if service_location.has_reactive_value:
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=description,
)
for description in REACTIVE_SENSORS
]
)
# Add solar sensors (some are available in local only env)
if service_location.has_solar_production:
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=description,
)
for description in SOLAR_SENSORS
if not service_location.local_polling or description.local_polling
]
)
# Add all CT measurements
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=SmappeeSensorEntityDescription(
key="load",
name=measurement.name,
native_unit_of_measurement=POWER_WATT,
sensor_id=measurement_id,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
)
for measurement_id, measurement in service_location.measurements.items()
]
)
# Add phase- and line voltages if available
if service_location.has_voltage_values:
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=description,
)
for description in VOLTAGE_SENSORS
if (
service_location.phase_type in description.phase_types
and not (
description.key.startswith("line_")
and service_location.local_polling
)
)
]
)
# Add Gas and Water sensors
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=SmappeeSensorEntityDescription(
key="sensor",
name=channel.get("name"),
icon=(
"mdi:water"
if channel.get("type") == "water"
else "mdi:gas-cylinder"
),
native_unit_of_measurement=channel.get("uom"),
sensor_id=f"{sensor_id}-{channel.get('channel')}",
state_class=SensorStateClass.MEASUREMENT,
),
)
for sensor_id, sensor in service_location.sensors.items()
for channel in sensor.channels
]
)
# Add today_energy_kwh sensors for switches
entities.extend(
[
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
description=SmappeeSensorEntityDescription(
key="switch",
name=f"{actuator.name} - energy today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
sensor_id=actuator_id,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
for actuator_id, actuator in service_location.actuators.items()
if actuator.type == "SWITCH" and not service_location.local_polling
]
)
async_add_entities(entities, True)
class SmappeeSensor(SensorEntity):
"""Implementation of a Smappee sensor."""
entity_description: SmappeeSensorEntityDescription
def __init__(
self,
smappee_base,
service_location,
description: SmappeeSensorEntityDescription,
):
"""Initialize the Smappee sensor."""
self.entity_description = description
self._smappee_base = smappee_base
self._service_location = service_location
@property
def name(self):
"""Return the name for this sensor."""
sensor_key = self.entity_description.key
sensor_name = self.entity_description.name
if sensor_key in ("sensor", "load", "switch"):
return (
f"{self._service_location.service_location_name} - "
f"{sensor_key.title()} - {sensor_name}"
)
return f"{self._service_location.service_location_name} - {sensor_name}"
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
sensor_key = self.entity_description.key
if sensor_key in ("load", "sensor", "switch"):
return (
f"{self._service_location.device_serial_number}-"
f"{self._service_location.service_location_id}-"
f"{sensor_key}-{self.entity_description.sensor_id}"
)
return (
f"{self._service_location.device_serial_number}-"
f"{self._service_location.service_location_id}-"
f"{sensor_key}"
)
@property
def device_info(self) -> DeviceInfo:
"""Return the device info for this sensor."""
return DeviceInfo(
identifiers={(DOMAIN, self._service_location.device_serial_number)},
manufacturer="Smappee",
model=self._service_location.device_model,
name=self._service_location.service_location_name,
sw_version=self._service_location.firmware_version,
)
async def async_update(self):
"""Get the latest data from Smappee and update the state."""
await self._smappee_base.async_update()
sensor_key = self.entity_description.key
if sensor_key == "total_power":
self._attr_native_value = self._service_location.total_power
elif sensor_key == "total_reactive_power":
self._attr_native_value = self._service_location.total_reactive_power
elif sensor_key == "solar_power":
self._attr_native_value = self._service_location.solar_power
elif sensor_key == "alwayson":
self._attr_native_value = self._service_location.alwayson
elif sensor_key in (
"phase_voltages_a",
"phase_voltages_b",
"phase_voltages_c",
):
phase_voltages = self._service_location.phase_voltages
if phase_voltages is not None:
if sensor_key == "phase_voltages_a":
self._attr_native_value = phase_voltages[0]
elif sensor_key == "phase_voltages_b":
self._attr_native_value = phase_voltages[1]
elif sensor_key == "phase_voltages_c":
self._attr_native_value = phase_voltages[2]
elif sensor_key in ("line_voltages_a", "line_voltages_b", "line_voltages_c"):
line_voltages = self._service_location.line_voltages
if line_voltages is not None:
if sensor_key == "line_voltages_a":
self._attr_native_value = line_voltages[0]
elif sensor_key == "line_voltages_b":
self._attr_native_value = line_voltages[1]
elif sensor_key == "line_voltages_c":
self._attr_native_value = line_voltages[2]
elif sensor_key in (
"power_today",
"power_current_hour",
"power_last_5_minutes",
"solar_today",
"solar_current_hour",
"alwayson_today",
):
trend_value = self._service_location.aggregated_values.get(sensor_key)
self._attr_native_value = (
round(trend_value) if trend_value is not None else None
)
elif sensor_key == "load":
self._attr_native_value = self._service_location.measurements.get(
self.entity_description.sensor_id
).active_total
elif sensor_key == "sensor":
sensor_id, channel_id = self.entity_description.sensor_id.split("-")
sensor = self._service_location.sensors.get(int(sensor_id))
for channel in sensor.channels:
if channel.get("channel") == int(channel_id):
self._attr_native_value = channel.get("value_today")
elif sensor_key == "switch":
cons = self._service_location.actuators.get(
self.entity_description.sensor_id
).consumption_today
if cons is not None:
self._attr_native_value = round(cons / 1000.0, 2)
| {
"content_hash": "cf5cd24a5a7241e58b42618fe2e130b5",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 88,
"avg_line_length": 38.0762331838565,
"alnum_prop": 0.582970203745142,
"repo_name": "home-assistant/home-assistant",
"id": "e2eef5d06cf79d2347d633601683f0a3f268a3c2",
"size": "16982",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/smappee/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
import time
from test_framework.test_particl import (
ParticlTestFramework,
isclose
)
KEEP_FUNDING_TX_DATA = 86400 * 31
class SmsgRollingCacheTest(ParticlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True # Don't copy from cache
self.num_nodes = 3
self.extra_args = [ ['-debug', '-reservebalance=10000000'] for i in range(self.num_nodes) ]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
def run_test(self):
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
self.import_genesis_coins_b(nodes[1])
nodes[2].extkeyimportmaster(nodes[2].mnemonic('new')['master'])
address1 = nodes[1].getnewaddress()
nodes[1].smsgaddlocaladdress(address1)
nodes[2].smsgaddaddress(address1, nodes[1].smsglocalkeys()['wallet_keys'][0]['public_key'])
address0 = nodes[0].getnewaddress()
address1 = nodes[1].getnewaddress()
nodes[0].smsgaddlocaladdress(address0)
nodes[1].smsgaddaddress(address0, nodes[0].smsglocalkeys()['wallet_keys'][0]['public_key'])
text = 'Some text to test'
ro = nodes[1].smsgsend(address1, address0, text, True, 10)
assert (ro['result'] == 'Sent.')
assert (isclose(ro['fee'], 0.00159000))
self.stakeBlocks(1, nStakeNode=1)
self.log.info('Waiting for paid smsg to send')
for i in range(20):
txns = nodes[1].smsgdebug('dumpfundingtxids')
if len(txns['txns']) < 1:
time.sleep(1)
continue
break
assert (len(txns['txns']) > 0)
now = int(time.time())
for i in range(len(nodes)):
nodes[i].setmocktime(now + KEEP_FUNDING_TX_DATA, True)
self.log.info('Waiting for rolling cache to expire')
for i in range(60):
txns = nodes[1].smsgdebug('dumpfundingtxids')
if len(txns['txns']) > 0:
time.sleep(1)
continue
break
assert (len(txns['txns']) == 0)
self.log.info('Done.')
if __name__ == '__main__':
SmsgRollingCacheTest().main()
| {
"content_hash": "28390c432918b7f5802e25ed686df43e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 99,
"avg_line_length": 31.051948051948052,
"alnum_prop": 0.5872020075282308,
"repo_name": "particl/particl-core",
"id": "b2f809c2451769ed841cf3c20366889190e9ccc0",
"size": "2601",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/feature_part_smsg_rollingcache.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "C",
"bytes": "2889723"
},
{
"name": "C++",
"bytes": "13218778"
},
{
"name": "CMake",
"bytes": "29182"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1740"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "229063"
},
{
"name": "Makefile",
"bytes": "159386"
},
{
"name": "Objective-C++",
"bytes": "5486"
},
{
"name": "Python",
"bytes": "3388224"
},
{
"name": "QMake",
"bytes": "1276"
},
{
"name": "Sage",
"bytes": "59728"
},
{
"name": "Scheme",
"bytes": "26427"
},
{
"name": "Shell",
"bytes": "190057"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0022_auto_20141027_0155'),
]
operations = [
migrations.AlterField(
model_name='robouser',
name='major',
field=models.CharField(max_length=50),
),
]
| {
"content_hash": "6453be403e0d558ff1a751731af26489",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 50,
"avg_line_length": 21.055555555555557,
"alnum_prop": 0.5910290237467019,
"repo_name": "sreidy/roboticsclub.org",
"id": "0d68a3ff065e758effaff4afcb3d110b7f688b78",
"size": "403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "robocrm/migrations/0023_auto_20141120_1102.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87807"
},
{
"name": "HTML",
"bytes": "32573"
},
{
"name": "JavaScript",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "239652"
}
],
"symlink_target": ""
} |
"""Python dataset sparse tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import sparse_ops
def any_sparse(classes):
"""Checks for sparse tensor.
Args:
classes: a structure of objects that identify the dataset item classes
Returns:
`True` if `classes` contains a sparse tensor type and `False` otherwise.
"""
return any(c is sparse_tensor.SparseTensor for c in nest.flatten(classes))
def as_dense_shapes(shapes, classes):
"""Converts sparse tensor shapes to their physical shapes.
Args:
shapes: a structure of shapes to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `shapes`, containing
`tensor_shape.unknown_shape()` at positions where `classes` contains
`tf.SparseTensor` and matching contents of `shapes` otherwise
"""
ret = nest.pack_sequence_as(shapes, [
tensor_shape.unknown_shape() if c is sparse_tensor.SparseTensor else shape
for shape, c in zip(nest.flatten(shapes), nest.flatten(classes))
])
return ret
def as_dense_types(types, classes):
"""Converts sparse tensor types to `dtypes.variant`.
Args:
types: a structure of types to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `types`, containing
`dtypes.variant` at positions where `classes` contains `tf.SparseTensor` and
matching contents of `types` otherwise
"""
ret = nest.pack_sequence_as(types, [
dtypes.variant if c is sparse_tensor.SparseTensor else ty
for ty, c in zip(nest.flatten(types), nest.flatten(classes))
])
return ret
def deserialize_sparse_tensors(tensors, types, shapes, classes):
"""Deserializes sparse tensors.
Args:
tensors: a structure of tensors to deserialize.
types: a structure that holds information about types of `tensors`
shapes: a structure that holds information about shapes of `tensors`
classes: a structure of objects that identify the dataset item classes
Returns:
`tensors` with any serialized sparse tensors replaced by their deserialized
version.
"""
ret = nest.pack_sequence_as(types, [
sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims)
if c is sparse_tensor.SparseTensor else tensor
for (tensor, ty, shape, c) in zip(
nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes),
nest.flatten(classes))
])
return ret
def get_classes(tensors):
"""Gets classes for a structure of tensors.
Args:
tensors: the tensor structure to get classes for.
Returns:
a structure matching the nested structure of `tensors`, containing
`tf.SparseTensor` at positions where `tensors` contains a sparse tensor and
`tf.Tensor` otherwise
"""
return nest.pack_sequence_as(tensors, [
sparse_tensor.SparseTensor
if isinstance(tensor, sparse_tensor.SparseTensor) else ops.Tensor
for tensor in nest.flatten(tensors)
])
def serialize_many_sparse_tensors(tensors):
"""Serializes many sparse tensors into a batch.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by the serialized batch.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
if sparse_tensor.is_sparse(tensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
def serialize_sparse_tensors(tensors):
"""Serializes sparse tensors.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by their serialized version.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant)
if isinstance(tensor, sparse_tensor.SparseTensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
| {
"content_hash": "80f56ce2fa64aadd390103347b25f73d",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 31.384057971014492,
"alnum_prop": 0.7252366658970215,
"repo_name": "renyi533/tensorflow",
"id": "d7e516e24f91e4a0c12ed778742c6e4db1b9e4a4",
"size": "5020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/util/sparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/human/shared_hair_human_female_s20.iff"
result.attribute_template_id = -1
result.stfName("hair_name","hair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "360f3d20bbee0a1e0839768b45dcee91",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6918032786885245,
"repo_name": "anhstudios/swganh",
"id": "eef61d8dd8530fd16217f4a330342d65a423bd00",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/hair/human/shared_hair_human_female_s20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import sys, os, time, tempfile, shutil, socket, fcntl, signal
from subprocess import Popen, PIPE
try:
socket.getaddrinfo('non-existing-host', 80)
NON_EXISTING_RESOLVABLE = True
except socket.gaierror:
NON_EXISTING_RESOLVABLE = False
def get_testenv():
env = os.environ.copy()
env['PYTHONPATH'] = os.getcwd()
return env
def get_ephemeral_port():
s = socket.socket()
s.bind(("", 0))
return s.getsockname()[1]
def _non_block_read(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except Exception:
return ""
def _wait_for_port(portnum, delay=0.1, attempts=100):
while attempts > 0:
s = socket.socket()
if s.connect_ex(('127.0.0.1', portnum)) == 0:
s.close()
return
time.sleep(delay)
attempts -= 1
raise RuntimeError("Port %d is not open" % portnum)
class SplashServer(object):
def __init__(self, logfile=None, proxy_profiles_path=None,
js_profiles_path=None, filters_path=None, portnum=None,
proxy_portnum=None, extra_args=None, verbosity=3):
self.logfile = logfile
self.proxy_profiles_path = proxy_profiles_path
self.js_profiles_path = js_profiles_path
self.filters_path = filters_path
self.verbosity = verbosity
self.portnum = portnum if portnum is not None else get_ephemeral_port()
self.proxy_portnum = proxy_portnum if proxy_portnum is not None else get_ephemeral_port()
self.tempdir = tempfile.mkdtemp()
self.extra_args = extra_args or []
def __enter__(self):
args = [sys.executable, '-u', '-m', 'splash.server']
args += ['--cache-path', self.tempdir]
args += ['--port', str(self.portnum)]
args += ['--verbosity', str(self.verbosity)]
if self.logfile:
args += ['-f', self.logfile]
if self.proxy_profiles_path:
args += ['--proxy-profiles-path', self.proxy_profiles_path]
if self.js_profiles_path:
args += ['--js-profiles-path', self.js_profiles_path]
if self.filters_path:
args += ['--filters-path', self.filters_path]
if self.proxy_portnum:
args += ['--proxy-portnum', str(self.proxy_portnum)]
args.extend(self.extra_args)
self.proc = Popen(args, env=get_testenv())
self.proc.poll()
if self.proc.returncode is not None:
msg = ("unable to start splash server. return code: %d" %
self.proc.returncode)
raise RuntimeError(msg)
_wait_for_port(self.portnum)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.proc is not None:
self.proc.send_signal(signal.SIGINT)
self.proc.wait()
self.proc = None
shutil.rmtree(self.tempdir)
def url(self, path):
return "http://localhost:%s/%s" % (self.portnum, path.lstrip('/'))
def proxy_url(self):
return "http://localhost:%s" % self.proxy_portnum
class MockServer(object):
def __init__(self, http_port=None, https_port=None, proxy_port=None):
self.http_port = http_port if http_port is not None else get_ephemeral_port()
self.https_port = https_port if https_port is not None else get_ephemeral_port()
self.proxy_port = proxy_port if proxy_port is not None else get_ephemeral_port()
def __enter__(self):
self.proc = Popen([
sys.executable,
'-u', '-m', 'splash.tests.mockserver',
'--http-port', str(self.http_port),
'--https-port', str(self.https_port),
'--proxy-port', str(self.proxy_port),
],
env=get_testenv()
)
for port in (self.http_port, self.https_port, self.proxy_port):
_wait_for_port(port)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.proc.kill()
self.proc.wait()
def url(self, path, gzip=True, host='localhost'):
gzip_path = '' if not gzip else '/gzip'
return "http://%s:%s%s/%s" % (
host, self.http_port, gzip_path, path.lstrip('/')
)
def https_url(self, path):
return "https://localhost:%s/%s" % (self.https_port, path.lstrip('/'))
class TestServers(object):
def __init__(self, logfile=None):
self.logfile = logfile
self.tmp_folder = tempfile.mkdtemp("splash-tests-tmp")
self.proxy_profiles_path = self._copy_test_folder('proxy_profiles')
self.js_profiles_path = self._copy_test_folder('js_profiles')
self.filters_path = self._copy_test_folder('filters')
self.lua_modules = self._copy_test_folder('lua_modules')
self.lua_sandbox_allowed_modules = ['emulation', 'utils', 'utils_patch', 'non_existing']
self.mock_http_port = get_ephemeral_port()
self.mock_https_port = get_ephemeral_port()
self.mock_proxy_port = get_ephemeral_port()
print("TestServers mock ports: %s http, %s https, %s proxy" % (
self.mock_http_port, self.mock_https_port, self.mock_proxy_port))
self._fix_testproxy_port()
def _copy_test_folder(self, src, dst=None):
src_path = test_path(src)
dst_path = os.path.join(self.tmp_folder, dst or src)
shutil.copytree(src_path, dst_path)
return dst_path
def _fix_testproxy_port(self):
filename = os.path.join(self.proxy_profiles_path, 'test.ini')
with open(filename, 'rb') as f:
data = f.read()
data = data.replace('8990', str(self.mock_proxy_port))
with open(filename, 'wb') as f:
f.write(data)
def __enter__(self):
self.mockserver = MockServer(
self.mock_http_port,
self.mock_https_port,
self.mock_proxy_port,
)
self.mockserver.__enter__()
self.splashserver = SplashServer(
logfile=self.logfile,
proxy_profiles_path=self.proxy_profiles_path,
js_profiles_path=self.js_profiles_path,
filters_path=self.filters_path,
extra_args = [
'--lua-package-path', '%s/?.lua' % self.lua_modules.rstrip('/'),
'--lua-sandbox-allowed-modules', ';'.join(self.lua_sandbox_allowed_modules),
]
)
self.splashserver.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.splashserver.__exit__(None, None, None)
self.mockserver.__exit__(None, None, None)
shutil.rmtree(self.tmp_folder)
def test_path(*args):
return os.path.join(os.path.dirname(__file__), *args)
| {
"content_hash": "802dec88ee44f586da0e6b12665023ae",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 97,
"avg_line_length": 34.42713567839196,
"alnum_prop": 0.5813749817544884,
"repo_name": "dwdm/splash",
"id": "aa5e4347fed785f2794acf5666175e77cc0ccb6b",
"size": "6851",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "splash/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "307"
},
{
"name": "Lua",
"bytes": "21331"
},
{
"name": "Python",
"bytes": "598792"
},
{
"name": "Shell",
"bytes": "3747"
}
],
"symlink_target": ""
} |
"""
This file contains tests that focus on general set-up
of the python environment. They also check things that
probably don't need any checking.
"""
import sys
import pathlib
# Ensure that 'matmodlab' is imported from parent directory.
sys.path.insert(0, str(pathlib.Path(__file__).absolute().parent.parent))
try:
import matmodlab2
except ImportError:
matmodlab2 = None
def test_absolute_truth():
"""Ensure that the testing library is working."""
# Setup
# Test
assert True
# Teardown
def test_require_python3():
"""The module 'matmodlab' and these tests require at least Python 3.0."""
# Setup
# Test
assert sys.version_info > (3, 0) or sys.version_info > (2, 6)
# Teardown
def test_import():
"""Ensure that 'matmodlab' is imported."""
# Setup
# Test
assert matmodlab2 is not None
# Teardown
def test_initialize():
"""Do something simple with 'matmodlab'."""
# Setup
# Test
assert 'MaterialPointSimulator' in matmodlab2.__all__
# Teardown
if __name__ == '__main__':
test_import()
| {
"content_hash": "3b4842694235ef9e44baa070eb51ac52",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 18.283333333333335,
"alnum_prop": 0.649954421148587,
"repo_name": "matmodlab/matmodlab2",
"id": "b81098e5da9e05154a495d146a36003cd8952cc5",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_environment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "419488"
},
{
"name": "Jupyter Notebook",
"bytes": "1458750"
},
{
"name": "Python",
"bytes": "400440"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('imgpage', '0002_auto_20150823_2320'),
]
operations = [
migrations.AddField(
model_name='imgobject',
name='img_title',
field=models.CharField(default=datetime.datetime(2015, 9, 10, 10, 31, 8, 712000, tzinfo=utc), max_length=128),
preserve_default=False,
),
]
| {
"content_hash": "043a96ea3bb6061b70e05fcf7badb176",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 122,
"avg_line_length": 25.904761904761905,
"alnum_prop": 0.6305147058823529,
"repo_name": "sudhaMR/Django-Perception",
"id": "df646581aa01bf2710ad53e55bf86b1cb7b6d58b",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imgpage/migrations/0003_imgobject_img_title.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "27536"
},
{
"name": "Python",
"bytes": "15450"
}
],
"symlink_target": ""
} |
"""A module for defining structured request species.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com>
"""
import itertools
import numpy as np
import random
import math
from collections import OrderedDict, defaultdict, Iterable
from cyclopts import tools as cyctools
from cyclopts import cyclopts_io as cycio
from cyclopts import io_tools as io_tools
import cyclopts.exchange_instance as exinst
from cyclopts.problems import ProblemSpecies
from cyclopts.exchange_family import ResourceExchange
from cyclopts.structured_species import data
from cyclopts.structured_species import tools as strtools
def rxtr_commods(kind, fidelity):
"""return a list of commodities per reactor kind and fidelity"""
commods = [data.Commodities.uox]
if fidelity > 0:
commods += [data.Commodities.th_mox, data.Commodities.f_mox]
if fidelity > 1 and kind != data.Reactors.th:
commods += [data.Commodities.f_thox]
return commods
class Point(strtools.Point):
"""A container class representing a point in parameter space"""
"""ordered mapping from input parameters to default values and np.dtypes, see
the theory manual for further explanation of the parameter names"""
parameters = OrderedDict(sorted({
"f_rxtr": strtools.Param(0, np.int8),
"f_fc": strtools.Param(0, np.int8),
"f_loc": strtools.Param(0, np.int8),
# use a different tool for more than 4294967295 rxtrs!
"n_rxtr": strtools.Param(1, np.uint32),
"r_t_f": strtools.Param(1.0, np.float32),
"r_th_pu": strtools.Param(0.0, np.float32),
"r_s_th": strtools.Param(1.0 / 2, np.float32),
"r_s_mox_uox": strtools.Param(1.0, np.float32),
"r_s_mox": strtools.Param(1.0 / 2, np.float32),
"r_s_thox": strtools.Param(1.0 / 2, np.float32),
"f_mox": strtools.Param(1.0, np.float32),
"r_inv_proc": strtools.Param(1.0, np.float32),
# use a different tool for more than 4294967295 regions!
"n_reg": strtools.Param(10, np.uint32),
"r_l_c": strtools.Param(1.0, np.float32),
"seed": strtools.Param(-1.0, np.int64), # default is negative
}.items(), key=lambda t: t[0]))
def __init__(self, d=None):
"""Parameters
----------
d : dict, optional
a dictionary with key value pairs of parameter name, parameter
value
"""
super(Point, self).__init__(d)
if self.seed > 0:
random.seed(self.seed)
def _parameters(self):
return Point.parameters
class Reactor(strtools.Reactor):
"""An extension reactor model for Structured Request Species"""
def __init__(self, kind, point, gids, nids):
super(Reactor, self).__init__(kind, point)
req = True
qty = data.fuel_unit * data.core_vol_frac[self.kind]
self.base_req_qty = qty / self.n_assems
gid = gids.next()
grp = exinst.ExGroup(gid, req, qty)
grp.AddCap(qty)
self.group = grp
self._gen_nodes(point, gid, nids)
def _gen_nodes(self, point, gid, nids):
self.nodes = []
self.commod_to_nodes = defaultdict(list)
req = True
excl = True
for commod in rxtr_commods(self.kind, point.f_fc):
nreq = self.n_assems
# account for less mox requests
if self.kind == data.Reactors.th:
if commod == data.Commodities.f_mox or \
commod == data.Commodities.th_mox:
nreq = int(math.ceil(nreq * point.f_mox))
for i in range(nreq):
node = exinst.ExNode(nids.next(), gid, req,
self.req_qty(commod), excl)
self.nodes.append(node)
self.commod_to_nodes[commod].append(node)
def req_qty(self, commod):
return self.base_req_qty * data.relative_qtys[self.kind][commod]
class Supplier(object):
"""A simplified supplier model for Structured Request Species"""
def __init__(self, kind, point, gids):
self.kind = kind
self.nodes = []
req = True
# process then inventory
rhs = [data.sup_rhs[kind],
data.sup_rhs[kind] * point.r_inv_proc * strtools.conv_ratio(kind)]
grp = exinst.ExGroup(gids.next(), not req)
for cap in rhs:
grp.AddCap(cap)
self.group = grp
self.loc = data.loc()
def coeffs(self, qty, enr):
return [data.converters[self.kind][k](
qty, enr, data.sup_to_commod[self.kind]) / qty \
for k in ['proc', 'inv']]
class PathMap(io_tools.PathMap):
"""A simple container class for mapping columns to Hdf5 paths
implemented for the StructuredRequest problem species"""
def __init__(self, col):
super(PathMap, self).__init__(col)
@property
def path(self):
# this is an approx. heuristic, it might need to be updated
inst = StructuredRequest()
col = self.col
if col.startswith('n_') and not col.endswith('_rxtr') \
and not col.endswith('_reg'):
tbl = inst.sum_tbl_name
elif col.endswith('pref_flow') or col.endswith('cost_flow'):
tbl = strtools.pp_tbl_name
else:
tbl = inst.param_tbl_name
return '/'.join([inst.io_prefix, tbl])
class StructuredRequest(ProblemSpecies):
"""A class representing structured request-based exchanges species."""
@property
def family(cls):
"""Returns
-------
family : ResourceExchange
An instance of this species' family
"""
return ResourceExchange()
@property
def name(cls):
"""Returns
-------
name : string
The name of this species
"""
return 'StructuredRequest'
@property
def param_tbl_name(cls):
"""Returns
-------
name : string
The name of parameter space output table
"""
return 'Points'
@property
def sum_tbl_name(cls):
"""Returns
-------
name : string
The name of summary output table
"""
return 'Summary'
@property
def summary_tbls(cls):
"""
Returns
-------
name : list
A list of cyclopts_io.TblDesc for summary tables.
"""
return strtools.tbl_descs(cls.io_prefix) + [
cycio.TblDesc('/'.join([cls.io_prefix, cls.sum_tbl_name]),
'param', 'paramid'),
cycio.TblDesc('/'.join([cls.io_prefix, cls.param_tbl_name]),
'param', 'paramid'),
]
def __init__(self):
super(StructuredRequest, self).__init__()
self.space = None
self._n_points = None
# 16 bytes for uuid
self._param_dtype = np.dtype(
[('paramid', ('str', 16)), ('family', ('str', 30))] + \
[(name, param.dtype) for name, param in Point.parameters.items()])
facs = ['n_r_th', 'n_r_f_mox', 'n_r_f_thox', 'n_s_uox', 'n_s_th_mox',
'n_s_f_mox', 'n_s_f_thox']
self._sum_dtype = np.dtype(
[('paramid', ('str', 16)), ('family', ('str', 30))] + \
[(name, np.uint32) for name in facs])
self.nids = cyctools.Incrementer()
self.gids = cyctools.Incrementer()
self.arcids = cyctools.Incrementer()
self.instid = None
self.tables = None
self.groups = None
self.arc_tbl = None
def register_tables(self, h5file, prefix):
"""Parameters
----------
h5file : PyTables File
the hdf5 file
prefix : string
the absolute path to the group for tables of this species
Returns
-------
tables : list of cyclopts_io.Tables
All tables that could be written to by this species.
"""
return [cycio.Table(h5file, '/'.join([prefix, self.param_tbl_name]),
self._param_dtype),
cycio.Table(h5file, '/'.join([prefix, self.sum_tbl_name]),
self._sum_dtype),
cycio.Table(h5file, '/'.join([prefix, strtools.pp_tbl_name]),
strtools.pp_tbl_dtype),]
def register_groups(self, h5file, prefix):
"""Parameters
----------
h5file : PyTables File
the hdf5 file
prefix : string
the absolute path to the group for tables of this family
Returns
-------
groups : list of cyclopts_io.Groups
All groups that could be written to by this species.
"""
return [cycio.Group(h5file, '/'.join([prefix, strtools.arc_io_name]))]
def read_space(self, space_dict):
"""Parameters
----------
space_dict : dict
A dictionary container resulting from the reading in of a run
control file
"""
self.space = {k: v if isinstance(v, Iterable) else [v] \
for k, v in space_dict.items() \
if k in Point.parameters}
@property
def n_points(self):
"""Returns
-------
n : int
The total number of points in the parameter space
"""
return cyctools.n_permutations(self.space)
def points(self):
"""Derived classes must implement this function returning a
representation of a point in its parameter space to be used by other
class member functions.
Returns
-------
point_generator : generator
A generator for representation of a point in parameter space to be
used by this species
"""
keys = self.space.keys()
vals = self.space.values()
for args in cyctools.expand_args(vals):
d = {keys[i]: args[i] for i in range(len(args))}
yield Point(d)
def record_point(self, point, param_uuid, io_manager):
"""Parameters
----------
point : tuple or other
A representation of a point in parameter space
param_uuid : uuid
The uuid of the point in parameter space
io_manager : cyclopts_io.IOManager, optional
IOManager that gives access to tables/groups for writing
"""
tables = io_manager.tables
uid = param_uuid.bytes if len(param_uuid.bytes) == 16 \
else param_uuid.bytes + '\0'
data = [param_uuid.bytes, self.family.name]
data += [getattr(point, k) for k in Point.parameters.keys()]
tables[self.param_tbl_name].append_data([tuple(data)])
data = [param_uuid.bytes, self.family.name]
data += strtools.reactor_breakdown(point)
data += strtools.support_breakdown(point)[:-1]
tables[self.sum_tbl_name].append_data([tuple(data)])
def _get_reactors(self, point):
n_uox, n_mox, n_thox = strtools.reactor_breakdown(point)
uox_th_r = np.ndarray(
shape=(n_uox,),
buffer=np.array([Reactor(data.Reactors.th, point,
self.gids, self.nids) \
for i in range(n_uox)]),
dtype=Reactor)
mox_f_r = np.ndarray(
shape=(n_mox,),
buffer=np.array([Reactor(data.Reactors.f_mox, point,
self.gids, self.nids) \
for i in range(n_mox)]),
dtype=Reactor)
thox_f_r = np.ndarray(
shape=(n_thox,),
buffer=np.array([Reactor(data.Reactors.f_thox, point,
self.gids, self.nids) \
for i in range(n_thox)]),
dtype=Reactor)
reactors = {
data.Reactors.th: uox_th_r,
data.Reactors.f_mox: mox_f_r,
data.Reactors.f_thox: thox_f_r,
}
return reactors
def _get_suppliers(self, point):
n_uox, n_t_mox, n_f_mox, n_f_thox, _ = strtools.support_breakdown(point)
uox_s = np.ndarray(
shape=(n_uox,),
buffer=np.array([Supplier(data.Supports.uox, point, self.gids) \
for i in range(n_uox)]),
dtype=Supplier)
mox_th_s = np.ndarray(
shape=(n_t_mox,),
buffer=np.array([Supplier(data.Supports.th_mox, point, self.gids) \
for i in range(n_t_mox)]),
dtype=Supplier)
mox_f_s = np.ndarray(
shape=(n_f_mox,),
buffer=np.array([Supplier(data.Supports.f_mox, point, self.gids) \
for i in range(n_f_mox)]),
dtype=Supplier)
thox_s = np.ndarray(
shape=(n_f_thox,),
buffer=np.array([Supplier(data.Supports.f_thox, point, self.gids) \
for i in range(n_f_thox)]),
dtype=Supplier)
suppliers = {
data.Supports.uox: uox_s,
data.Supports.th_mox: mox_th_s,
data.Supports.f_mox: mox_f_s,
data.Supports.f_thox: thox_s,
}
return suppliers
def _generate_supply(self, point, commod, requester, supplier):
r = requester
s = supplier
commod_pref = data.rxtr_pref_basis[r.kind][commod]
loc_pref = strtools.loc_pref(r.loc, s.loc, point.f_loc, point.n_reg)
pref = commod_pref + loc_pref * point.r_l_c
rnodes = r.commod_to_nodes[commod]
arcs = []
enr = r.enr(commod)
# req coeffs have full orders take into relative fissile material
req_coeffs = r.coeffs(commod)
# sup coeffs act on the quantity of fissile material
qty = r.req_qty(commod)
sup_coeffs = s.coeffs(qty, enr)
for i in range(len(rnodes)):
req = True
nid = self.nids.next()
node = exinst.ExNode(nid, s.group.id, not req, qty)
s.nodes.append(node)
arcid = self.arcids.next()
if self.arc_tbl is not None:
self.arc_tbl.append_data([(arcid, commod, commod_pref, loc_pref)])
#print('id', arcid, 'commod', commod, 'pref', pref)
arcs.append(exinst.ExArc(
arcid,
rnodes[i].id, req_coeffs,
nid, sup_coeffs,
pref))
return arcs
def _get_arcs(self, point, reactors, suppliers):
arcs = []
for r_kind, r_ary in reactors.items():
for r in r_ary:
for commod in rxtr_commods(r.kind, point.f_fc):
for s in suppliers[data.commod_to_sup[commod]]:
supply = self._generate_supply(point, commod, r, s)
arcs.append(supply)
return np.concatenate(arcs)
def gen_inst(self, point, instid=None, io_manager=None):
"""Parameters
----------
point : structured_species.Point
A representation of a point in parameter space
instid : uuid
the id for the instance
io_manager : cyclopts_io.IOManager, optional
IOManager that gives access to tables/groups for writing
Returns
-------
inst : tuple of lists of ExGroups, ExNodes, and ExArgs
A representation of a problem instance to be used by this species'
family
"""
# reset id generation
self.nids = cyctools.Incrementer()
self.gids = cyctools.Incrementer()
self.arcids = cyctools.Incrementer()
self.instid = instid
# set up IO
self.tables = None if io_manager is None else io_manager.tables
self.groups = None if io_manager is None else io_manager.groups
self.arc_tbl = None
if self.groups is not None:
arc_grp = self.groups[strtools.arc_io_name]
arc_tbl_path = '/'.join([arc_grp.path,
'id_' + self.instid.hex])
self.arc_tbl = cycio.Table(arc_grp.h5file, arc_tbl_path, strtools.arc_tbl_dtype)
self.arc_tbl.cond_create()
# species objects
reactors = self._get_reactors(point)
suppliers = self._get_suppliers(point)
# create arcs
arcs = self._get_arcs(point, reactors, suppliers)
if self.arc_tbl is not None:
self.arc_tbl.flush()
# collect nodes
r_nodes = np.concatenate([x.nodes for ary in reactors.values() \
for x in ary])
s_nodes = np.concatenate([x.nodes for ary in suppliers.values() \
for x in ary])
nodes = np.concatenate((r_nodes, s_nodes))
# collect groups
r_groups = [x.group for ary in reactors.values() for x in ary]
s_groups = [x.group for ary in suppliers.values() for x in ary]
groups = np.concatenate((r_groups, s_groups))
return groups, nodes, arcs
def post_process(self, instid, solnids, props, io_managers):
"""Perform any post processing on input and output.
Parameters
----------
instid : UUID
UUID of the instance to post process
solnids : tuple of UUIDs
a collection of solution UUIDs corresponding the instid
props : tuple, other
as defined by cyclopts.exchange_family
io_managers : tuple of cyclopts.cyclopts_io.IOManager
iomanager from an input file, iomanager from an output file,
and iomanager from a post-processed file
"""
strtools.post_process(instid, solnids, props, io_managers, self.name)
| {
"content_hash": "f7afb8a7780c970099129b057cc251ce",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 92,
"avg_line_length": 36.98364008179959,
"alnum_prop": 0.5409455349737351,
"repo_name": "gidden/cyclopts",
"id": "68e620e551dc002d5e4a3b483c14aed966f21a68",
"size": "18085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyclopts/structured_species/request.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3245"
},
{
"name": "C++",
"bytes": "10533"
},
{
"name": "Python",
"bytes": "530544"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
} |
from . import PluginBase
__all__ = ['Hash']
class Hash(PluginBase):
def help(self):
return """[string1] [string2]
Convert from ASCII string to hash string.
ex)
> hash a b
c3
"""
def execute(self, args):
if len(args) != 2:
raise ValueError('wrong number of arguments are given')
def calc(string):
return modified_scientific_notation(ascii_digits(string))
return '{:x}'.format(calc(args[0]) + calc(args[1]))
def ascii_digits(string):
"""
Convert string to digits based on ASCII codes.
>>> ascii_digits('a') # same as ord('a')
97
>>> ascii_digits('ab')
9798
>>> ascii_digits('abc')
979899
>>> ascii_digits('I ♥️ Python.')
Traceback (most recent call last):
...
ValueError: string has non-ascii character
:type string: str
:rtype: int
"""
digits = [ord(c) for c in string]
if any(d > 127 for d in digits):
raise ValueError('string has non-ascii character')
return int(''.join([str(d) for d in digits]))
def modified_scientific_notation(num):
"""
Convert numerical value to modified scientific notation.
If length of num is greater than 22, return num as it is.
>>> modified_scientific_notation(1)
1
Otherwise, return concatenated two parts, mantissa without integral part and exponent.
>>> modified_scientific_notation(123456789012345678901234567890) # 1.2345678901234568e+29
234567890123456829
:type num: int
:rtype: int
"""
if len(str(num)) < 22:
return num
import re
p = re.split('(\.|e\+)', '{:.16e}'.format(num))
return int(p[2] + p[4])
| {
"content_hash": "175c42e588e7b6e6d0c271ba3a7679f8",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 94,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.6083333333333333,
"repo_name": "mikoim/funstuff",
"id": "08d36169858cefd8c564741d6686e0bcf5a37e8d",
"size": "1684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codecheck/codecheck-3608/app/plugins/hash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "26516"
},
{
"name": "C",
"bytes": "85616"
},
{
"name": "C++",
"bytes": "7221"
},
{
"name": "CMake",
"bytes": "2282"
},
{
"name": "CSS",
"bytes": "1661"
},
{
"name": "Elixir",
"bytes": "541"
},
{
"name": "Go",
"bytes": "8433"
},
{
"name": "HTML",
"bytes": "176956"
},
{
"name": "Haskell",
"bytes": "603"
},
{
"name": "Java",
"bytes": "3206"
},
{
"name": "JavaScript",
"bytes": "48109"
},
{
"name": "Jupyter Notebook",
"bytes": "9043"
},
{
"name": "Lex",
"bytes": "5512"
},
{
"name": "Makefile",
"bytes": "273"
},
{
"name": "Mathematica",
"bytes": "189020"
},
{
"name": "Nginx",
"bytes": "648"
},
{
"name": "PHP",
"bytes": "5962"
},
{
"name": "Protocol Buffer",
"bytes": "968"
},
{
"name": "Python",
"bytes": "147116"
},
{
"name": "Shell",
"bytes": "2192"
},
{
"name": "Yacc",
"bytes": "14693"
}
],
"symlink_target": ""
} |
import sys
import codecs
import pprint
import argparse
from const import *
from utils import *
def show_resource(resource):
print "name: %s" % resource['name']
print "description: %s" % resource['description']
print "orgUnitPath: %s" % resource['orgUnitPath']
print "parentOrgUnitPath: %s" % resource['parentOrgUnitPath']
if resource.has_key('blockInheritance'):
print "blockInheritance: %s" % resource['blockInheritance']
def show_resource_list(resources, verbose):
if resources.has_key('organizationUnits'):
for resource in resources['organizationUnits']:
if verbose:
show_resource(resource)
print ""
else:
print resource['orgUnitPath']
def list_orgunit(sv, args):
params = {}
params['customerId'] = args.customerId
if args.orgUnitPath:
params['orgUnitPath'] = args.orgUnitPath.decode('utf-8')
if args.type:
params['type'] = args.type
status, r = execute_admin_api(sv.list(**params))
if args.jsonPretty:
print to_pretty_json(r['organizationUnits'])
elif args.json:
print to_json(r['organizationUnits'])
else:
show_resource_list(r, args.verbose)
def get_orgunit(sv, args):
status, r = execute_admin_api(sv.get(customerId=args.customerId, orgUnitPath=args.orgUnitPath))
if status == 404:
sys.stderr.write('%s does not exist\n' % args.orgUnitPath)
sys.exit(2)
if args.jsonPretty:
print to_pretty_json(r)
elif args.json:
print to_json(r)
else:
show_resource(r)
def insert_orgunit(sv, args):
body = { 'name': args.name,
'parentOrgUnitPath': args.parentOrgUnitPath }
if args.description:
body['description'] = args.description
if args.blockInheritance:
body['blockInheritance'] = True if args.blockInheritance == 'true' else False
status, r = execute_admin_api(sv.insert(customerId=args.customerId, body=body))
if args.verbose:
if args.jsonPretty:
print to_pretty_json(r)
elif args.json:
print to_json(r)
else:
show_resource(r)
def patch_orgunit(sv, args):
body = {}
if args.name:
body['name'] = args.name
if args.description:
body['description'] = args.description
if args.parentOrgUnitPath:
body['parentOrgUnitPath'] = args.parentOrgUnitPath
if args.blockInheritance:
body['blockInheritance'] = True if args.blockInheritance == 'true' else False
if len(body) > 0:
status, r = execute_admin_api(sv.patch(customerId=args.customerId, orgUnitPath=args.orgUnitPath, body=body))
if status == 404:
sys.stderr.write('%s does not exist\n' % args.orgUnitPath)
sys.exit(2)
if args.verbose:
if args.jsonPretty:
print to_pretty_json(r)
elif args.json:
print to_json(r)
else:
show_resource(r)
def delete_orgunit(sv, args):
status, r = execute_admin_api(sv.delete(customerId=args.customerId, orgUnitPath=args.orgUnitPath))
if status == 404:
sys.stderr.write('%s does not exist\n' % args.orgUnitPath)
sys.exit(2)
def main():
parser = argparse.ArgumentParser(parents=[tools.argparser])
subparsers = parser.add_subparsers(help='sub command')
#-------------------------------------------------------------------------
# LIST
#-------------------------------------------------------------------------
parser_list = subparsers.add_parser('list', help='Retrieves a list of all organization units for an account')
parser_list.add_argument('customerId', help='customer id')
parser_list.add_argument('--orgUnitPath', help='full path to the organization unit')
parser_list.add_argument('--type', choices=['all', 'children'], default='children',
help='all: all sub-org, children: immediate children only (default)')
parser_list.add_argument('-v', '--verbose', action='store_true',
help='show organization unit data')
parser_list.add_argument('--json', action='store_true', help='output in JSON')
parser_list.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
parser_list.set_defaults(func=list_orgunit)
#-------------------------------------------------------------------------
# GET
#-------------------------------------------------------------------------
parser_get = subparsers.add_parser('get', help='Retrieves an organization unit')
parser_get.add_argument('customerId', help='customer id')
parser_get.add_argument('orgUnitPath', help='full path of the organization unit')
parser_get.add_argument('--json', action='store_true', help='output in JSON')
parser_get.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
parser_get.set_defaults(func=get_orgunit)
#-------------------------------------------------------------------------
# INSERT
#-------------------------------------------------------------------------
parser_insert = subparsers.add_parser('insert', help='Adds an organization unit')
parser_insert.add_argument('customerId', help='customer id')
parser_insert.add_argument('name', help='organization unit name')
parser_insert.add_argument('parentOrgUnitPath', help='parent organization unit path')
parser_insert.add_argument('--blockInheritance', choices=['true', 'false'])
parser_insert.add_argument('--description')
parser_insert.add_argument('-v', '--verbose', action='store_true', help='show all group data')
parser_insert.add_argument('--json', action='store_true', help='output in JSON')
parser_insert.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
parser_insert.set_defaults(func=insert_orgunit)
#-------------------------------------------------------------------------
# PATCH
#-------------------------------------------------------------------------
parser_patch = subparsers.add_parser('patch', help='Updates an organization unit')
parser_patch.add_argument('customerId')
parser_patch.add_argument('orgUnitPath', help='full path of the organization unit')
parser_patch.add_argument('--name')
parser_patch.add_argument('--description')
parser_patch.add_argument('--orgUnitPath')
parser_patch.add_argument('--parentOrgUnitPath')
parser_patch.add_argument('--blockInheritance', choices=['true', 'false'])
parser_patch.add_argument('-v', '--verbose', action='store_true', help='show all group data')
parser_patch.add_argument('--json', action='store_true', help='output in JSON')
parser_patch.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
parser_patch.set_defaults(func=patch_orgunit)
#-------------------------------------------------------------------------
# DELETE
#-------------------------------------------------------------------------
parser_delete = subparsers.add_parser('delete', help='Removes an organization unit')
parser_delete.add_argument('customerId')
parser_delete.add_argument('orgUnitPath', help='full path of the organization unit')
parser_delete.set_defaults(func=delete_orgunit)
args = parser.parse_args()
service = get_directory_service(args)
args.func(service.orgunits(), args)
if __name__ == '__main__':
sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
main()
| {
"content_hash": "59ef94c136fb5cdb51ff0eac2fd81394",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 116,
"avg_line_length": 44.104046242774565,
"alnum_prop": 0.5874180865006553,
"repo_name": "yteraoka/googleapps-directory-tools",
"id": "a2d41751ca3997627fd3c0f2239f11b6a4e1d120",
"size": "7677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orgunit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86205"
}
],
"symlink_target": ""
} |
from flask import render_template, Response, request, flash, redirect, url_for, abort, send_from_directory
from flask.ext.login import login_required
from werkzeug import secure_filename
from app import app, db, models
from app.forms.upload_forms import RecordForm
from app.views.util.login import requires_roles
from app.views.util.image import resize_image_thumb, resize_image_regular
from app.views.util.selectors import personChoicesForSelectField, selectPersonsWhoAreDoctors, selectPersonsWhoAreRadiologists, selectPersonsWhoArePatients
import uuid
import os
@app.route('/record/list', methods=['GET'])
@login_required
@requires_roles('r', 'a')
def list_records():
"""
List of all the Radiology Records
"""
records = models.Record.query.all()
return render_template('list_records.html', records=records)
@app.route('/record/upload', methods=['GET', 'POST'])
@login_required
@requires_roles('r', 'a')
def upload_record():
"""
Upload a new Radiology Record
"""
form = RecordForm(request.form)
# Populate the Form
patients = selectPersonsWhoArePatients()
patientChoices = personChoicesForSelectField(patients)
doctors = selectPersonsWhoAreDoctors()
doctorChoices = personChoicesForSelectField(doctors)
radiologists = selectPersonsWhoAreRadiologists()
radiologistChoices = personChoicesForSelectField(radiologists)
form.patient_id.choices = patientChoices
form.doctor_id.choices = doctorChoices
form.radiologist_id.choices = radiologistChoices
if form.validate_on_submit():
# Create the Record
record = models.Record()
form.populate_obj(record)
db.session.add(record)
db.session.commit()
# Create the images
images = request.files.getlist("images")
if images:
for img in images:
# Create Images
file_name = str(uuid.uuid4()) + secure_filename(img.filename)
image_file = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
img.save(image_file)
thumb_file_name = os.path.splitext(file_name)[0] + ".thumbnail"
thumb_file = os.path.splitext(image_file)[0] + ".thumbnail"
regular_file_name = os.path.splitext(file_name)[0] + ".regular"
regular_file = os.path.splitext(image_file)[0] + ".regular"
# Resize
resize_image_thumb(img, thumb_file)
resize_image_regular(img, regular_file)
image = models.Image(record_id=record.record_id,
thumbnail=thumb_file_name.encode('utf-8'),
regular_size=regular_file_name.encode('utf-8'),
full_size=file_name.encode('utf-8'))
db.session.add(image)
db.session.commit()
flash(u'Record {} has been saved'.format(record.record_id))
return redirect(url_for('list_records'))
return render_template('upload_record.html', title='Upload a Record', form=form)
@app.route('/record/<id>/delete', methods=['GET', 'POST'])
@login_required
@requires_roles('r', 'a')
def delete_record(id):
"""
Delete a record.
:param id: id of record to delete.
:return: Delete warning dialogue page.
"""
record = models.Record.query.get_or_404(id)
form = RecordForm(obj=record)
if form.is_submitted():
# Delete the record's images
for image in record.images:
db.session.delete(image)
db.session.commit()
# Delete the record
db.session.delete(record)
db.session.commit()
flash(u'{} has been deleted'.format(id))
return redirect(url_for('list_records'))
return render_template('delete_warning.html', form=form, objType="Record", objId=id)
@app.route('/image/<int:image_id>/full')
@login_required
@requires_roles('r', 'a')
def full_img(image_id):
"""
Returns a full size of an image from the database as a jpeg.
:param id: id of image to display.
"""
image = models.Image.query.get_or_404(image_id)
return send_from_directory(app.config['UPLOAD_FOLDER'], image.full_size.decode("utf-8"))
@app.route('/image/<int:image_id>/regular/<template>')
@app.route('/image/<int:image_id>/regular')
@login_required
@requires_roles('r', 'a')
def regular_img(image_id, template=""):
"""
Returns a regular of an image from the database as a jpeg.
:param id: id of image to display.
"""
image = models.Image.query.get_or_404(image_id)
if template != "":
return render_template('regular_image.html', image=image)
else:
return send_from_directory(app.config['UPLOAD_FOLDER'], image.regular_size.decode("utf-8"))
@app.route('/image/<int:image_id>/thumbnail')
@login_required
@requires_roles('r', 'a')
def thumbnail_img(image_id):
"""
Returns a thumbnail of an image from the database as a jpeg.
:param id: id of image to display.
"""
image = models.Image.query.get_or_404(image_id)
return send_from_directory(app.config['UPLOAD_FOLDER'], image.thumbnail.decode("utf-8"))
| {
"content_hash": "52066c85b98cac267900da0d64c2d416",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 154,
"avg_line_length": 34.79194630872483,
"alnum_prop": 0.6458333333333334,
"repo_name": "MarkGalloway/RIS",
"id": "d70704607a9db2b023add291a1f4e3d870069415",
"size": "5184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/record_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1197"
},
{
"name": "HTML",
"bytes": "16817"
},
{
"name": "Python",
"bytes": "47439"
}
],
"symlink_target": ""
} |
from typing import Optional
from path import Path
import cli_ui as ui
import tsrc
import tsrc.executor
class RemoteSetter(tsrc.executor.Task[tsrc.Repo]):
"""
For each repository:
* look for the remote configured in the manifest,
* add any missing remote,
* if a remote is found but with an incorrect URL, update its URL.
"""
def __init__(self, workspace_path: Path) -> None:
self.workspace_path = workspace_path
def on_start(self, *, num_items: int) -> None:
ui.info_2("Configuring remotes")
def on_failure(self, *, num_errors: int) -> None:
ui.error("Failed to configure remotes")
def display_item(self, repo: tsrc.Repo) -> str:
return repo.dest
def process(self, index: int, count: int, repo: tsrc.Repo) -> None:
for remote in repo.remotes:
existing_remote = self.get_remote(repo, remote.name)
if existing_remote:
if existing_remote.url != remote.url:
self.set_remote(repo, remote)
else:
self.add_remote(repo, remote)
def get_remote(self, repo: tsrc.Repo, name: str) -> Optional[tsrc.Remote]:
full_path = self.workspace_path / repo.dest
rc, url = tsrc.git.run_captured(
full_path, "remote", "get-url", name, check=False
)
if rc != 0:
return None
else:
return tsrc.Remote(name=name, url=url)
def set_remote(self, repo: tsrc.Repo, remote: tsrc.Remote) -> None:
full_path = self.workspace_path / repo.dest
# fmt: off
ui.info_3(repo.dest + ":", "Update remote", ui.reset,
ui.bold, remote.name, ui.reset,
"to new url:", ui.brown, f"({remote.url})")
# fmt: on
tsrc.git.run(full_path, "remote", "set-url", remote.name, remote.url)
def add_remote(self, repo: tsrc.Repo, remote: tsrc.Remote) -> None:
full_path = self.workspace_path / repo.dest
# fmt: off
ui.info_3(repo.dest + ":", "Add remote",
ui.bold, remote.name, ui.reset,
ui.brown, f"({remote.url})")
# fmt: on
tsrc.git.run(full_path, "remote", "add", remote.name, remote.url)
| {
"content_hash": "b9ea3ebd72d5266e93da81679c136beb",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 34.21212121212121,
"alnum_prop": 0.5739592559787422,
"repo_name": "TankerApp/tsrc",
"id": "99db37e4babc6ef0eb63763622c0f2e88e1c42ee",
"size": "2258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsrc/workspace/remote_setter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "108166"
}
],
"symlink_target": ""
} |
def sqrt(n):
approx = n/2.0
better = (approx + n/approx)/2.0
while better != approx:
approx = better
better = (approx + n/approx)/2.0
return approx
x = input("Enter x: ")
print "Square Root: ", sqrt(x)
| {
"content_hash": "5f0c4da3b9460abc4a4d23e33026ea91",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 40,
"avg_line_length": 23.5,
"alnum_prop": 0.5617021276595745,
"repo_name": "yusufshakeel/Python-Project",
"id": "8fe7d2af41b0405bb50be4204507a7753d6abc28",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/newtonSquareRoot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54750"
}
],
"symlink_target": ""
} |
import requests
import re
from hashlib import sha512
from webpageparser import WebPageParser, WebPageNode
class WebPage:
titles_seen = dict()
descriptions_seen = dict() # hash for description: id of the first webpage
regex_url = re.compile(r'(?:http[s]?:/|)/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
def __init__(self, url, depth=0, internal=True):
"""
Initialise an instance of WebPage
"""
self.id = None
self.url = url
self.depth = depth
self.internal = internal
self.noindex = False
self.link_used_by = list()
self.ressource_used_by = list()
self.link_towards_ext = None # None => not analysed
self.link_towards_int = None
self.ressource_from = None
self.has_brokenlinks = False
self.has_brokenressources = False
self.server_unreachable = False
self.server_invalid_query = False
self.status = 0
self.content_type = None
self.content_length = None
self.duplicated_title = False
self.duplicated_description = False
self.check_dict = None
def get_url(self):
return self.url
def get_formatted_length(self):
if not self.content_length:
return "N.A."
unit = 0
length = self.content_length
while length >= 1024:
unit += 1
length /= 1024.
units = ["o", "ko", "Mo", "Go", "To"]
if unit >= len(units):
return "N.A."
if unit == 0:
return "%d%s" % (int(length), units[unit])
return "%d.%d%s" % (int(length), int((length*10.)%10), units[unit])
def add_link_used_by(self, from_wp):
"""
A scanned webpage has a link towards this page
"""
self.link_used_by.append(from_wp)
def add_ressource_used_by(self, from_wp):
"""
A scanned webpage uses this webpage as a resource
"""
self.ressource_used_by.append(from_wp)
def check_failures(func):
def inner(*args, **kwargs):
output = func(*args, **kwargs)
self = args[0]
print "depth=%d, url=%s [%s][%d]" % (self.depth, self.url, self.content_type, self.status)
if self.status not in (200, 301, 302):
for wp in self.link_used_by:
wp.has_brokenlinks = True
for wp in self.ressource_used_by:
wp.has_brokenressources = True
return output
return inner
def carryout_request(self, full_request, num_retry=0):
"""
Try to get the webpage content/header of self.url
full_request : content
! full_request: header
It will retry num_retry times before stopping. The maximum number of tries is num_retry +1
"""
retry = 0
webpage = None
while retry <= num_retry and not webpage:
retry += 1
print "Try #%d: %s" % (retry, self.url)
self.server_unreachable = False
self.server_invalid_query = False
try:
if full_request:
webpage = requests.get(self.url, timeout=10)
else:
webpage = requests.head(self.url, timeout=10)
except requests.ConnectionError:
self.server_unreachable = True
wbepage = None
except requests.exceptions.Timeout:
self.server_unreachable = True
webpage = None
except requests.HTTPError:
self.server_invalid_query = True
webpage = False
except requests.exceptions.TooManyRedirects:
self.server_invalid_query = True
return None
except requests.exceptions.InvalidSchema:
self.server_invalid_query = True
return None
if not webpage:
return None
# Status
self.status = webpage.status_code
# Content-type
try:
if webpage.headers['content-type']:
m = re.search(r'[a-zA-Z\.0-9-]+/[a-zA-Z\.0-9-]+', webpage.headers['content-type'])
if m:
self.content_type = m.group(0)
except KeyError:
pass
# Content-length
# The best way to get the real value of content-length is to compute it from the data
# The value returned by a server during head/get query for non-static files is not good (except on custom configurations of Apache)
if full_request:
self.content_length = len(webpage.text)
else:
try:
self.content_length = int(webpage.headers['content-length'])
except TypeError:
pass
except ValueError:
pass
except KeyError:
pass
return webpage
def sourcecode_analysis(self, html_code, website, seocheckmanager, nofollow, noindex):
"""
Analyse the source code of the webpage
in order to give relevant details concerning ways to improve the ranking of the website
This analysis focus on:
+ Gathering data for SEOChecks
+ Adding probes in order to check the availability of ressources (images, favicons, iframes, applets, stylesheets or js scripts)
+ Adding probes to crawl pages that are directly linked to this one
+ Getting title and description to find possible duplicates beween different pages
"""
webpageparser = WebPageParser()
webpageparser.feed(html_code)
# SEOCheckManager
self.check_dict = seocheckmanager.generate_webpage_check_dict(webpageparser)
# Look for ressources
nodes_ressources = webpageparser.find("script[src] , link[href] , img[src] , iframe[src] , object[data] , applet[code]")
for node in nodes_ressources:
try:
node_tag = node.get_tag()
node_attrs = node.get_attrs()
if node_tag == "link":
url = node_attrs["href"]
elif node_tag == "object":
url = node_attrs["data"]
elif node_tag == "applet":
url = node_attrs["code"]
else:
url = node_attrs["src"]
m_url = WebPage.regex_url.match(url)
if m_url:
wp = website.retrieve_webpage(self, url, True)
self.ressource_from.append(wp)
if wp.status and wp.status not in (200, 301, 302):
self.has_brokenressources = True
except KeyError:
pass
# meta[name=robots]
nofollow_global = False
if nofollow:
nodes = webpageparser.find("meta[name=robots][content*=nofollow]")
if len(nodes) >= 1:
nofollow_global = True
# Look for other pages
if not nofollow_global:
nodes_a = webpageparser.find("a[href]")
for node in nodes_a:
try:
node_attrs = node.get_attrs()
url = node_attrs["href"]
try:
nofollow_local = "nofollow" in node_attrs["rel"]
except KeyError:
nofollow_local = False
if nofollow and nofollow_local:
continue
m_url = WebPage.regex_url.match(url)
if m_url:
wp = website.retrieve_webpage(self, url)
if wp.internal:
self.link_towards_int.append(wp)
else:
self.link_towards_ext.append(wp)
if wp.status and wp.status not in (200, 301, 302):
self.has_brokenlinks = True
except KeyError:
pass
# title / description
nodes = webpageparser.find("head > meta[name=robots][content*=noindex]")
if len(nodes) >= 1:
self.noindex = True
if noindex:
return
nodes = webpageparser.find("head > title")
if len(nodes) >= 1:
node = nodes[0]
title = node.get_data()
if title:
title_digest = sha512(title.encode('utf-8')).digest()
if title_digest in WebPage.titles_seen.keys():
self.duplicated_title = True
WebPage.titles_seen[title_digest].duplicated_title = True
else:
WebPage.titles_seen[title_digest] = self
nodes = webpageparser.find("head > meta[name=description][content]")
if len(nodes) >= 1:
node = nodes[0]
description = node.get_attrs()["content"]
if description:
description_digest = sha512(description.encode('utf-8')).digest()
if description_digest in WebPage.descriptions_seen.keys():
self.duplicated_description = True
WebPage.descriptions_seen[description_digest].duplicated_description = True
else:
WebPage.descriptions_seen[description_digest] = self
@check_failures
def scan(self, website, seocheckmanager, noindex, nofollow, deep, num_retry):
"""
Scan the webpage
looking for relationships with other pages
"""
self.link_towards_ext = list()
self.link_towards_int = list()
self.ressource_from = list()
webpage_header = self.carryout_request(False, num_retry)
if not webpage_header:
return
# Not a success
# or external page
if deep:
if not self.content_type: # if content-type is not defined for deep analysis: full request
pass
elif self.status not in (200, 301, 302) or "text/html" not in self.content_type:
return
else:
if self.status != 200 or not self.internal or not self.content_type or "text/html" not in self.content_type:
return
self.status = 0
self.content_length = None
webpage_query = self.carryout_request(True, num_retry)
if not webpage_query:
return
# Status can change when we run a get query
# eg. 500 status can be caused by a programming error that cancels the generation of the page
if self.status != 200:
return
# Stop there for external webpages and deep analysis
if not self.internal:
return
self.sourcecode_analysis(webpage_query.text, website, seocheckmanager, nofollow, noindex)
return
def get_check_dict(self):
return self.check_dict
| {
"content_hash": "efadfabc50361529bfce76f9db6770d3",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 139,
"avg_line_length": 35.57232704402516,
"alnum_prop": 0.5236916548797736,
"repo_name": "dubzzz/py-seo-helper",
"id": "7c40b149f4758dd30a3258306f0140713904f02c",
"size": "11312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webpage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1102"
},
{
"name": "Python",
"bytes": "58094"
}
],
"symlink_target": ""
} |
from app_util import default_logger
from app_util import iso8601_to_fixed_ts
class db_connector_template():
def __init__(self, **kwargs):
"""
kwargs should contain: logger, debug_level
"""
self.logger = kwargs.get("logger", default_logger)
self.debug_level = kwargs.get("debug_level", 0)
self.tz = kwargs.get("tz", "GMT")
self.sql_create_table = kwargs.get("sql_create_table")
self.sql_insert_table = kwargs.get("sql_insert_table")
self.db_init(**kwargs)
def db_init(self, **kwargs):
'''
TEMPLATE should be overwritten.
- initialize your database.
- the return value should be:
+ False: something error.
+ True: succeeded.
'''
return True
def db_submit(self, **kwargs):
'''
TEMPLATE should be overwritten.
- submit the data into your database such as mongodb or sqlite3.
- the return value should be:
+ None: ignore parsing.
+ False: something error.
+ True: succeeded.
'''
return True
def get_app_data(self, kv_data, **kwargs):
"""
if something error happenes, return False.
"""
app_data = kv_data.get("__app_data")
if app_data is None:
self.logger.error("the payload haven't been parsed.")
return False
app_data["ts"] = iso8601_to_fixed_ts(kv_data["Time"], self.tz)
app_data["deveui"] = kv_data["DevEUI"]
app_data["rssi"] = kv_data["LrrRSSI"]
app_data["snr"] = kv_data["LrrSNR"]
if self.debug_level > 0:
self.logger.debug("app_data = {}".format(app_data))
return app_data
| {
"content_hash": "ba190121bcf4f1663a55242c24ff658a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 72,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.5560640732265446,
"repo_name": "tanupoo/lorawan-ss-as",
"id": "44879b884df5c188b8de70b28e228c3838d8ddb0",
"size": "1748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_connectors/db_connector_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27001"
},
{
"name": "Shell",
"bytes": "1362"
}
],
"symlink_target": ""
} |
from sympy.combinatorics.named_groups import SymmetricGroup, AlternatingGroup,\
CyclicGroup
from sympy.combinatorics.testutil import _verify_bsgs, _cmp_perm_lists,\
_naive_list_centralizer, _verify_centralizer,\
_verify_normal_closure
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.perm_groups import PermutationGroup
from random import shuffle
def test_cmp_perm_lists():
S = SymmetricGroup(4)
els = list(S.generate_dimino())
other = els[:]
shuffle(other)
assert _cmp_perm_lists(els, other) == True
def test_naive_list_centralizer():
# verified by GAP
S = SymmetricGroup(3)
A = AlternatingGroup(3)
assert _naive_list_centralizer(S, S) == [Permutation([0, 1, 2])]
assert PermutationGroup(_naive_list_centralizer(S, A)) == A
def test_verify_bsgs():
S = SymmetricGroup(5)
S.schreier_sims()
base = S.base
strong_gens = S.strong_gens
gens = S.generators
assert _verify_bsgs(S, base, strong_gens) == True
assert _verify_bsgs(S, base[:-1], strong_gens) == False
assert _verify_bsgs(S, base, S.generators) == False
def test_verify_centralizer():
# verified by GAP
S = SymmetricGroup(3)
A = AlternatingGroup(3)
triv = PermutationGroup([Permutation([0, 1, 2])])
assert _verify_centralizer(S, S, centr=triv)
assert _verify_centralizer(S, A, centr=A)
def test_verify_normal_closure():
# verified by GAP
S = SymmetricGroup(3)
A = AlternatingGroup(3)
assert _verify_normal_closure(S, A, closure=A)
S = SymmetricGroup(5)
A = AlternatingGroup(5)
C = CyclicGroup(5)
assert _verify_normal_closure(S, A, closure=A)
assert _verify_normal_closure(S, C, closure=A)
| {
"content_hash": "6e2d2991cb27bd41e33089c71e6832e2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 33,
"alnum_prop": 0.6905594405594405,
"repo_name": "flacjacket/sympy",
"id": "63a1a1bdb1acd11dc7b3403eb062bf7ade90422c",
"size": "1716",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/combinatorics/tests/test_testutil.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10293116"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""Test the collector flows."""
import os
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_registry
from grr.lib import artifact_test
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
# pylint: disable=unused-import
from grr.lib.flows.general import artifact_fallbacks
from grr.lib.flows.general import collectors
# pylint: enable=unused-import
from grr.lib.flows.general import transfer
from grr.test_data import client_fixture
# pylint: mode=test
class CollectorTest(artifact_test.ArtifactTest):
pass
class TestArtifactCollectors(CollectorTest):
"""Test the artifact collection mechanism with fake artifacts."""
def setUp(self):
"""Make sure things are initialized."""
super(TestArtifactCollectors, self).setUp()
self.original_artifact_reg = artifact_registry.ArtifactRegistry.artifacts
artifact_registry.ArtifactRegistry.ClearRegistry()
self.LoadTestArtifacts()
artifact_reg = artifact_registry.ArtifactRegistry.artifacts
self.fakeartifact = artifact_reg["FakeArtifact"]
self.fakeartifact2 = artifact_reg["FakeArtifact2"]
self.output_count = 0
with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd:
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(kb)
def tearDown(self):
super(TestArtifactCollectors, self).tearDown()
artifact_registry.ArtifactRegistry.artifacts = self.original_artifact_reg
self.fakeartifact.sources = [] # Reset any ArtifactSources
self.fakeartifact.conditions = [] # Reset any Conditions
self.fakeartifact2.sources = [] # Reset any ArtifactSources
self.fakeartifact2.conditions = [] # Reset any Conditions
def testInterpolateArgs(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase())
collect_flow.current_artifact_name = "blah"
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test1"))
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test2"))
test_rdf = rdfvalue.KnowledgeBase()
action_args = {"usernames": ["%%users.username%%", "%%users.username%%"],
"nointerp": "asdfsdf", "notastring": test_rdf}
kwargs = collect_flow.InterpolateDict(action_args)
self.assertItemsEqual(kwargs["usernames"],
["test1", "test2", "test1", "test2"])
self.assertEqual(kwargs["nointerp"], "asdfsdf")
self.assertEqual(kwargs["notastring"], test_rdf)
# We should be using an array since users.username will expand to multiple
# values.
self.assertRaises(ValueError, collect_flow.InterpolateDict,
{"bad": "%%users.username%%"})
list_args = collect_flow.InterpolateList(["%%users.username%%",
r"%%users.username%%\aa"])
self.assertItemsEqual(list_args, ["test1", "test2", r"test1\aa",
r"test2\aa"])
list_args = collect_flow.InterpolateList(["one"])
self.assertEqual(list_args, ["one"])
def testGrepRegexCombination(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
self.assertEqual(collect_flow._CombineRegex([r"simple"]),
"simple")
self.assertEqual(collect_flow._CombineRegex(["a", "b"]),
"(a)|(b)")
self.assertEqual(collect_flow._CombineRegex(["a", "b", "c"]),
"(a)|(b)|(c)")
self.assertEqual(collect_flow._CombineRegex(["a|b", "[^_]b", "c|d"]),
"(a|b)|([^_]b)|(c|d)")
def testGrep(self):
class MockCallFlow(object):
def CallFlow(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
mock_call_flow = MockCallFlow()
with utils.Stubber(collectors.ArtifactCollectorFlow, "CallFlow",
mock_call_flow.CallFlow):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase())
collect_flow.current_artifact_name = "blah"
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test1"))
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test2"))
collector = rdfvalue.ArtifactSource(
type=rdfvalue.ArtifactSource.SourceType.GREP,
attributes={"paths": ["/etc/passwd"],
"content_regex_list": [r"^a%%users.username%%b$"]})
collect_flow.Grep(collector, rdfvalue.PathSpec.PathType.TSK)
conditions = mock_call_flow.kwargs["conditions"]
self.assertEqual(len(conditions), 1)
regexes = conditions[0].contents_regex_match.regex.SerializeToString()
self.assertItemsEqual(regexes.split("|"), ["(^atest1b$)", "(^atest2b$)"])
self.assertEqual(mock_call_flow.kwargs["paths"], ["/etc/passwd"])
def testGetArtifact1(self):
"""Test we can get a basic artifact."""
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"FingerprintFile", "HashBuffer")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
# Dynamically add a ArtifactSource specifying the base path.
file_path = os.path.join(self.base_path, "test_img.dd")
coll1 = rdfvalue.ArtifactSource(
type=rdfvalue.ArtifactSource.SourceType.FILE,
attributes={"paths": [file_path]})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=artifact_list,
use_tsk=False, token=self.token, client_id=self.client_id):
pass
# Test the AFF4 file that was created.
fd1 = aff4.FACTORY.Open("%s/fs/os/%s" % (self.client_id, file_path),
token=self.token)
fd2 = open(file_path)
fd2.seek(0, 2)
self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE)))
def testRunGrrClientActionArtifact(self):
"""Test we can get a GRR client artifact."""
client_mock = action_mocks.ActionMock("ListProcesses")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = rdfvalue.ArtifactSource(
type=rdfvalue.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": r"ListProcesses"})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output="test_artifact"):
pass
# Test the AFF4 file that was created.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add("test_artifact"),
token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
self.assertTrue(len(fd) > 5)
def testRunGrrClientActionArtifactSplit(self):
"""Test that artifacts get split into separate collections."""
client_mock = action_mocks.ActionMock("ListProcesses", "StatFile")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = rdfvalue.ArtifactSource(
type=rdfvalue.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": r"ListProcesses"})
self.fakeartifact.sources.append(coll1)
self.fakeartifact2.sources.append(coll1)
artifact_list = ["FakeArtifact", "FakeArtifact2"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output="test_artifact",
split_output_by_artifact=True):
pass
# Check that we got two separate collections based on artifact name
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("test_artifact_FakeArtifact"),
token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
self.assertTrue(len(fd) > 5)
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("test_artifact_FakeArtifact2"),
token=self.token)
self.assertTrue(len(fd) > 5)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
def testConditions(self):
"""Test we can get a GRR client artifact with conditions."""
# Run with false condition.
client_mock = action_mocks.ActionMock("ListProcesses")
coll1 = rdfvalue.ArtifactSource(
type=rdfvalue.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": "ListProcesses"},
conditions=["os == 'Windows'"])
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
# Now run with matching or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
# Now run with impossible or condition.
coll1.conditions.append("os == 'NotTrue'")
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
def testSupportedOS(self):
"""Test supported_os inside the collector object."""
# Run with false condition.
client_mock = action_mocks.ActionMock("ListProcesses")
coll1 = rdfvalue.ArtifactSource(
type=rdfvalue.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": "ListProcesses"}, supported_os=["Windows"])
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
# Now run with matching or condition.
coll1.conditions = []
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
# Now run with impossible or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
coll1.supported_os = ["NotTrue"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
def _RunClientActionArtifact(self, client_mock, artifact_list):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
self.output_count += 1
output = "test_artifact_%d" % self.output_count
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output=output):
pass
# Test the AFF4 file was not created, as flow should not have run due to
# conditions.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(output),
token=self.token)
return fd
class TestArtifactCollectorsInteractions(CollectorTest):
"""Test the collection of artifacts.
This class loads both real and test artifacts to test the interaction of badly
defined artifacts with real artifacts.
"""
def setUp(self):
"""Add test artifacts to existing registry."""
super(TestArtifactCollectorsInteractions, self).setUp()
self.original_artifact_reg = artifact_registry.ArtifactRegistry.artifacts
self.LoadTestArtifacts()
def tearDown(self):
super(TestArtifactCollectorsInteractions, self).tearDown()
artifact_registry.ArtifactRegistry.artifacts = self.original_artifact_reg
def testProcessCollectedArtifacts(self):
"""Test downloading files from artifacts."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile",
"ListDirectory")
# Get KB initialized
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
artifact_list = ["WindowsPersistenceMechanismFiles"]
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
output="analysis/{p}/{u}-{t}",
split_output_by_artifact=True):
pass
# Check MultiGetFile got called for our runkey files
# TODO(user): RunKeys for S-1-5-20 are not found because users.sid only
# expands to users with profiles.
pathspecs = getfile_instrument.args[0][0].args.pathspecs
self.assertItemsEqual([x.path for x in pathspecs],
[u"C:\\Windows\\TEMP\\A.exe"])
artifact_list = ["BadPathspecArtifact"]
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
output="analysis/{p}/{u}-{t}",
split_output_by_artifact=True):
pass
self.assertFalse(getfile_instrument.args)
class TestArtifactCollectorsRealArtifacts(CollectorTest):
"""Test the collection of real artifacts."""
def _CheckDriveAndRoot(self):
client_mock = action_mocks.ActionMock("StatFile", "ListDirectory")
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=[
"SystemDriveEnvironmentVariable"],
token=self.token, client_id=self.client_id,
output="testsystemdrive"):
pass
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("testsystemdrive"), token=self.token)
self.assertEqual(len(fd), 1)
self.assertEqual(str(fd[0]), "C:")
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=["SystemRoot"],
token=self.token, client_id=self.client_id,
output="testsystemroot"):
pass
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN(self.client_id).Add("testsystemroot"), token=self.token)
self.assertEqual(len(fd), 1)
# Filesystem gives WINDOWS, registry gives Windows
self.assertTrue(str(fd[0]) in [r"C:\Windows", r"C:\WINDOWS"])
def testSystemDriveArtifact(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
class BrokenClientMock(action_mocks.ActionMock):
def StatFile(self, _):
raise IOError
def ListDirectory(self, _):
raise IOError
# No registry, broken filesystem, this should just raise.
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow",
BrokenClientMock(), artifact_list=[
"SystemDriveEnvironmentVariable"],
token=self.token,
client_id=self.client_id,
output="testsystemdrive"):
pass
# No registry, so this should use the fallback flow
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
self._CheckDriveAndRoot()
# Registry is present, so this should use the regular artifact collection
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
self._CheckDriveAndRoot()
def testRunWMIComputerSystemProductArtifact(self):
class WMIActionMock(action_mocks.ActionMock):
def WmiQuery(self, _):
return client_fixture.WMI_CMP_SYS_PRD
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
client_mock = WMIActionMock()
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock,
artifact_list=["WMIComputerSystemProduct"], token=self.token,
client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,
store_results_in_aff4=True):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token,)
hardware = client.Get(client.Schema.HARDWARE_INFO)
self.assertTrue(isinstance(hardware, rdfvalue.HardwareInfo))
self.assertEqual(str(hardware.serial_number), "2RXYYZ1")
self.assertEqual(str(hardware.system_manufacturer), "Dell Inc.")
def testRunWMIArtifact(self):
class WMIActionMock(action_mocks.ActionMock):
def WmiQuery(self, _):
return client_fixture.WMI_SAMPLE
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
client_mock = WMIActionMock()
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=["WMILogicalDisks"],
token=self.token, client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,
store_results_in_aff4=True):
pass
# Test that we set the client VOLUMES attribute
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdfvalue.Volume))
self.assertTrue(result.windows.drive_letter in ["Z:", "C:"])
if result.windows.drive_letter == "C:":
self.assertAlmostEqual(result.FreeSpacePercent(), 76.142, delta=0.001)
self.assertEqual(result.Name(), "C:")
elif result.windows.drive_letter == "Z:":
self.assertEqual(result.Name(), "homefileshare$")
self.assertAlmostEqual(result.FreeSpacePercent(), 58.823, delta=0.001)
def testRetrieveDependencies(self):
"""Test getting an artifact without a KB using retrieve_depdendencies."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile",
"ListDirectory")
artifact_list = ["WinDirEnvironmentVariable"]
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.FETCH_NOW,
output="testRetrieveDependencies"):
pass
output = aff4.FACTORY.Open(self.client_id.Add("testRetrieveDependencies"),
token=self.token)
self.assertEqual(len(output), 1)
self.assertEqual(output[0], r"C:\Windows")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "90c3f8c43be3220d7ce2539cc2756b86",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 80,
"avg_line_length": 41.54389312977099,
"alnum_prop": 0.644218843309293,
"repo_name": "ksmaheshkumar/grr",
"id": "d382e36dba8eb528d60d2f55cd5a088e88a7520c",
"size": "21791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/flows/general/collectors_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14993"
},
{
"name": "C",
"bytes": "9062"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "12047"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "48624"
},
{
"name": "JavaScript",
"bytes": "230351"
},
{
"name": "Makefile",
"bytes": "5863"
},
{
"name": "Protocol Buffer",
"bytes": "181723"
},
{
"name": "Python",
"bytes": "4855590"
},
{
"name": "Ruby",
"bytes": "4931"
},
{
"name": "Shell",
"bytes": "45459"
}
],
"symlink_target": ""
} |
from Axon.Ipc import WaitComplete
from Kamaelia.Apps.Europython09.BB.Exceptions import GotShutdownMessage
from Kamaelia.Apps.Europython09.BB.RequestResponseComponent import RequestResponseComponent
class Authenticator(RequestResponseComponent):
users = {}
State = {}
def main(self):
loggedin = False
try:
self.netPrint("")
while not loggedin:
self.send("login: ", "outbox")
yield self.waitMsg()
username = self.getMsg()[:-2] # strip \r\n
self.send("password: ", "outbox")
yield self.waitMsg()
password= self.getMsg()[:-2] # strip \r\n
self.netPrint("")
if self.users.get(username.lower(), None) == password:
self.netPrint("Login Successful")
loggedin = True
else:
self.netPrint("Login Failed!")
except GotShutdownMessage:
self.send(self.recv("control"), "signal")
if loggedin:
self.State["remoteuser"] = username
| {
"content_hash": "28a3cd910190ea3326a04002e81efaa1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 91,
"avg_line_length": 34.8125,
"alnum_prop": 0.5529622980251346,
"repo_name": "bbc/kamaelia",
"id": "ee4c686d2ad1bcae1ab106ffe897e7096c22b268",
"size": "1939",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Europython09/BB/Authenticator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
from unittest import TestCase
import time
from datetime import datetime
from py3utils import TimeUtils, DatetimeUtils
class TestTimeUtils(TestCase):
def test_get_current_timestamp(self):
expected = time.time()
actual = TimeUtils.get_current_timestamp()
self.assertTrue(0 < expected - actual < 1)
class TestDatetimeUtils(TestCase):
def test_get_current_local_dt(self):
expected = datetime.now()
actual = DatetimeUtils.get_current_local_dt()
self.assertTrue(expected.day == actual.day and expected.hour == actual.hour)
| {
"content_hash": "1bf0ab91f2a60ecacf89db3a9b0ae668",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 27.73913043478261,
"alnum_prop": 0.713166144200627,
"repo_name": "hezhiming/py3utils",
"id": "4484389266e92a67b858687466ba2397dcf54215",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18207"
},
{
"name": "Shell",
"bytes": "858"
}
],
"symlink_target": ""
} |
"""Support for Wink hubs."""
from datetime import timedelta
import json
import logging
import os
import time
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_ENTITY_ID, ATTR_NAME, CONF_EMAIL, CONF_PASSWORD,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, STATE_OFF, STATE_ON,
__version__)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import track_time_interval
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['python-wink==1.10.3', 'pubnubsub-handler==1.0.3']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'wink'
SUBSCRIPTION_HANDLER = None
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_USER_AGENT = 'user_agent'
CONF_OAUTH = 'oauth'
CONF_LOCAL_CONTROL = 'local_control'
CONF_MISSING_OAUTH_MSG = 'Missing oauth2 credentials.'
ATTR_ACCESS_TOKEN = 'access_token'
ATTR_REFRESH_TOKEN = 'refresh_token'
ATTR_CLIENT_ID = 'client_id'
ATTR_CLIENT_SECRET = 'client_secret'
ATTR_PAIRING_MODE = 'pairing_mode'
ATTR_KIDDE_RADIO_CODE = 'kidde_radio_code'
ATTR_HUB_NAME = 'hub_name'
WINK_AUTH_CALLBACK_PATH = '/auth/wink/callback'
WINK_AUTH_START = '/auth/wink'
WINK_CONFIG_FILE = '.wink.conf'
USER_AGENT = "Manufacturer/Home-Assistant{} python/3 Wink/3".format(
__version__)
DEFAULT_CONFIG = {
'client_id': 'CLIENT_ID_HERE',
'client_secret': 'CLIENT_SECRET_HERE',
}
SERVICE_ADD_NEW_DEVICES = 'pull_newly_added_devices_from_wink'
SERVICE_REFRESH_STATES = 'refresh_state_from_wink'
SERVICE_RENAME_DEVICE = 'rename_wink_device'
SERVICE_DELETE_DEVICE = 'delete_wink_device'
SERVICE_SET_PAIRING_MODE = 'pair_new_device'
SERVICE_SET_CHIME_VOLUME = "set_chime_volume"
SERVICE_SET_SIREN_VOLUME = "set_siren_volume"
SERVICE_ENABLE_CHIME = "enable_chime"
SERVICE_SET_SIREN_TONE = "set_siren_tone"
SERVICE_SET_AUTO_SHUTOFF = "siren_set_auto_shutoff"
SERVICE_SIREN_STROBE_ENABLED = "set_siren_strobe_enabled"
SERVICE_CHIME_STROBE_ENABLED = "set_chime_strobe_enabled"
SERVICE_ENABLE_SIREN = "enable_siren"
SERVICE_SET_DIAL_CONFIG = "set_nimbus_dial_configuration"
SERVICE_SET_DIAL_STATE = "set_nimbus_dial_state"
ATTR_VOLUME = "volume"
ATTR_TONE = "tone"
ATTR_ENABLED = "enabled"
ATTR_AUTO_SHUTOFF = "auto_shutoff"
ATTR_MIN_VALUE = "min_value"
ATTR_MAX_VALUE = "max_value"
ATTR_ROTATION = "rotation"
ATTR_SCALE = "scale"
ATTR_TICKS = "ticks"
ATTR_MIN_POSITION = "min_position"
ATTR_MAX_POSITION = "max_position"
ATTR_VALUE = "value"
ATTR_LABELS = "labels"
SCALES = ["linear", "log"]
ROTATIONS = ["cw", "ccw"]
VOLUMES = ["low", "medium", "high"]
TONES = ["doorbell", "fur_elise", "doorbell_extended", "alert",
"william_tell", "rondo_alla_turca", "police_siren",
"evacuation", "beep_beep", "beep"]
CHIME_TONES = TONES + ["inactive"]
AUTO_SHUTOFF_TIMES = [None, -1, 30, 60, 120]
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_EMAIL, CONF_OAUTH,
msg=CONF_MISSING_OAUTH_MSG): cv.string,
vol.Inclusive(CONF_PASSWORD, CONF_OAUTH,
msg=CONF_MISSING_OAUTH_MSG): cv.string,
vol.Inclusive(CONF_CLIENT_ID, CONF_OAUTH,
msg=CONF_MISSING_OAUTH_MSG): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, CONF_OAUTH,
msg=CONF_MISSING_OAUTH_MSG): cv.string,
vol.Optional(CONF_LOCAL_CONTROL, default=False): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
RENAME_DEVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
DELETE_DEVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
}, extra=vol.ALLOW_EXTRA)
SET_PAIRING_MODE_SCHEMA = vol.Schema({
vol.Required(ATTR_HUB_NAME): cv.string,
vol.Required(ATTR_PAIRING_MODE): cv.string,
vol.Optional(ATTR_KIDDE_RADIO_CODE): cv.string,
}, extra=vol.ALLOW_EXTRA)
SET_VOLUME_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_VOLUME): vol.In(VOLUMES),
})
SET_SIREN_TONE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_TONE): vol.In(TONES),
})
SET_CHIME_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_TONE): vol.In(CHIME_TONES),
})
SET_AUTO_SHUTOFF_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_AUTO_SHUTOFF): vol.In(AUTO_SHUTOFF_TIMES),
})
SET_STROBE_ENABLED_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_ENABLED): cv.boolean,
})
ENABLED_SIREN_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_ENABLED): cv.boolean
})
DIAL_CONFIG_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_MIN_VALUE): vol.Coerce(int),
vol.Optional(ATTR_MAX_VALUE): vol.Coerce(int),
vol.Optional(ATTR_MIN_POSITION): cv.positive_int,
vol.Optional(ATTR_MAX_POSITION): cv.positive_int,
vol.Optional(ATTR_ROTATION): vol.In(ROTATIONS),
vol.Optional(ATTR_SCALE): vol.In(SCALES),
vol.Optional(ATTR_TICKS): cv.positive_int,
})
DIAL_STATE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_VALUE): vol.Coerce(int),
vol.Optional(ATTR_LABELS): cv.ensure_list(cv.string),
})
WINK_COMPONENTS = [
'binary_sensor', 'sensor', 'light', 'switch', 'lock', 'cover', 'climate',
'fan', 'alarm_control_panel', 'scene', 'water_heater'
]
WINK_HUBS = []
def _request_app_setup(hass, config):
"""Assist user with configuring the Wink dev application."""
hass.data[DOMAIN]['configurator'] = True
configurator = hass.components.configurator
def wink_configuration_callback(callback_data):
"""Handle configuration updates."""
_config_path = hass.config.path(WINK_CONFIG_FILE)
if not os.path.isfile(_config_path):
setup(hass, config)
return
client_id = callback_data.get('client_id').strip()
client_secret = callback_data.get('client_secret').strip()
if None not in (client_id, client_secret):
save_json(_config_path,
{ATTR_CLIENT_ID: client_id,
ATTR_CLIENT_SECRET: client_secret})
setup(hass, config)
return
error_msg = "Your input was invalid. Please try again."
_configurator = hass.data[DOMAIN]['configuring'][DOMAIN]
configurator.notify_errors(_configurator, error_msg)
start_url = "{}{}".format(hass.config.api.base_url,
WINK_AUTH_CALLBACK_PATH)
description = """Please create a Wink developer app at
https://developer.wink.com.
Add a Redirect URI of {}.
They will provide you a Client ID and secret
after reviewing your request.
(This can take several days).
""".format(start_url)
hass.data[DOMAIN]['configuring'][DOMAIN] = configurator.request_config(
DOMAIN, wink_configuration_callback,
description=description, submit_caption="submit",
description_image="/static/images/config_wink.png",
fields=[{'id': 'client_id', 'name': 'Client ID', 'type': 'string'},
{'id': 'client_secret',
'name': 'Client secret',
'type': 'string'}]
)
def _request_oauth_completion(hass, config):
"""Request user complete Wink OAuth2 flow."""
hass.data[DOMAIN]['configurator'] = True
configurator = hass.components.configurator
if DOMAIN in hass.data[DOMAIN]['configuring']:
configurator.notify_errors(
hass.data[DOMAIN]['configuring'][DOMAIN],
"Failed to register, please try again.")
return
def wink_configuration_callback(callback_data):
"""Call setup again."""
setup(hass, config)
start_url = '{}{}'.format(hass.config.api.base_url, WINK_AUTH_START)
description = "Please authorize Wink by visiting {}".format(start_url)
hass.data[DOMAIN]['configuring'][DOMAIN] = configurator.request_config(
DOMAIN, wink_configuration_callback, description=description)
def setup(hass, config):
"""Set up the Wink component."""
import pywink
from pubnubsubhandler import PubNubSubscriptionHandler
if hass.data.get(DOMAIN) is None:
hass.data[DOMAIN] = {
'unique_ids': [],
'entities': {},
'oauth': {},
'configuring': {},
'pubnub': None,
'configurator': False
}
if config.get(DOMAIN) is not None:
client_id = config[DOMAIN].get(ATTR_CLIENT_ID)
client_secret = config[DOMAIN].get(ATTR_CLIENT_SECRET)
email = config[DOMAIN].get(CONF_EMAIL)
password = config[DOMAIN].get(CONF_PASSWORD)
local_control = config[DOMAIN].get(CONF_LOCAL_CONTROL)
else:
client_id = None
client_secret = None
email = None
password = None
local_control = None
hass.data[DOMAIN]['configurator'] = True
if None not in [client_id, client_secret]:
_LOGGER.info("Using legacy OAuth authentication")
if not local_control:
pywink.disable_local_control()
hass.data[DOMAIN]["oauth"]["client_id"] = client_id
hass.data[DOMAIN]["oauth"]["client_secret"] = client_secret
hass.data[DOMAIN]["oauth"]["email"] = email
hass.data[DOMAIN]["oauth"]["password"] = password
pywink.legacy_set_wink_credentials(email, password,
client_id, client_secret)
else:
_LOGGER.info("Using OAuth authentication")
if not local_control:
pywink.disable_local_control()
config_path = hass.config.path(WINK_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
_request_app_setup(hass, config)
return True
# else move on because the user modified the file
else:
save_json(config_path, DEFAULT_CONFIG)
_request_app_setup(hass, config)
return True
if DOMAIN in hass.data[DOMAIN]['configuring']:
_configurator = hass.data[DOMAIN]['configuring']
hass.components.configurator.request_done(_configurator.pop(
DOMAIN))
# Using oauth
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
# This will be called after authorizing Home-Assistant
if None not in (access_token, refresh_token):
pywink.set_wink_credentials(config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token)
# This is called to create the redirect so the user can Authorize
# Home .
else:
redirect_uri = '{}{}'.format(
hass.config.api.base_url, WINK_AUTH_CALLBACK_PATH)
wink_auth_start_url = pywink.get_authorization_url(
config_file.get(ATTR_CLIENT_ID), redirect_uri)
hass.http.register_redirect(WINK_AUTH_START, wink_auth_start_url)
hass.http.register_view(WinkAuthCallbackView(
config, config_file, pywink.request_token))
_request_oauth_completion(hass, config)
return True
pywink.set_user_agent(USER_AGENT)
hass.data[DOMAIN]['pubnub'] = PubNubSubscriptionHandler(
pywink.get_subscription_key())
def _subscribe():
hass.data[DOMAIN]['pubnub'].subscribe()
# Call subscribe after the user sets up wink via the configurator
# All other methods will complete setup before
# EVENT_HOMEASSISTANT_START is called meaning they
# will call subscribe via the method below. (start_subscription)
if hass.data[DOMAIN]['configurator']:
_subscribe()
def keep_alive_call(event_time):
"""Call the Wink API endpoints to keep PubNub working."""
_LOGGER.info("Polling the Wink API to keep PubNub updates flowing")
pywink.set_user_agent(str(int(time.time())))
_temp_response = pywink.get_user()
_LOGGER.debug(str(json.dumps(_temp_response)))
time.sleep(1)
pywink.set_user_agent(USER_AGENT)
_temp_response = pywink.wink_api_fetch()
_LOGGER.debug("%s", _temp_response)
_temp_response = pywink.post_session()
_LOGGER.debug("%s", _temp_response)
# Call the Wink API every hour to keep PubNub updates flowing
track_time_interval(hass, keep_alive_call, timedelta(minutes=60))
def start_subscription(event):
"""Start the PubNub subscription."""
_subscribe()
hass.bus.listen(EVENT_HOMEASSISTANT_START, start_subscription)
def stop_subscription(event):
"""Stop the PubNub subscription."""
hass.data[DOMAIN]['pubnub'].unsubscribe()
hass.data[DOMAIN]['pubnub'] = None
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, stop_subscription)
def save_credentials(event):
"""Save currently set OAuth credentials."""
if hass.data[DOMAIN]["oauth"].get("email") is None:
config_path = hass.config.path(WINK_CONFIG_FILE)
_config = pywink.get_current_oauth_credentials()
save_json(config_path, _config)
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, save_credentials)
# Save the users potentially updated oauth credentials at a regular
# interval to prevent them from being expired after a HA reboot.
track_time_interval(hass, save_credentials, timedelta(minutes=60))
def force_update(call):
"""Force all devices to poll the Wink API."""
_LOGGER.info("Refreshing Wink states from API")
for entity_list in hass.data[DOMAIN]['entities'].values():
# Throttle the calls to Wink API
for entity in entity_list:
time.sleep(1)
entity.schedule_update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_REFRESH_STATES, force_update)
def pull_new_devices(call):
"""Pull new devices added to users Wink account since startup."""
_LOGGER.info("Getting new devices from Wink API")
for _component in WINK_COMPONENTS:
discovery.load_platform(hass, _component, DOMAIN, {}, config)
hass.services.register(DOMAIN, SERVICE_ADD_NEW_DEVICES, pull_new_devices)
def set_pairing_mode(call):
"""Put the hub in provided pairing mode."""
hub_name = call.data.get('hub_name')
pairing_mode = call.data.get('pairing_mode')
kidde_code = call.data.get('kidde_radio_code')
for hub in WINK_HUBS:
if hub.name() == hub_name:
hub.pair_new_device(pairing_mode, kidde_radio_code=kidde_code)
def rename_device(call):
"""Set specified device's name."""
# This should only be called on one device at a time.
found_device = None
entity_id = call.data.get('entity_id')[0]
all_devices = []
for list_of_devices in hass.data[DOMAIN]['entities'].values():
all_devices += list_of_devices
for device in all_devices:
if device.entity_id == entity_id:
found_device = device
if found_device is not None:
name = call.data.get('name')
found_device.wink.set_name(name)
hass.services.register(DOMAIN, SERVICE_RENAME_DEVICE, rename_device,
schema=RENAME_DEVICE_SCHEMA)
def delete_device(call):
"""Delete specified device."""
# This should only be called on one device at a time.
found_device = None
entity_id = call.data.get('entity_id')[0]
all_devices = []
for list_of_devices in hass.data[DOMAIN]['entities'].values():
all_devices += list_of_devices
for device in all_devices:
if device.entity_id == entity_id:
found_device = device
if found_device is not None:
found_device.wink.remove_device()
hass.services.register(DOMAIN, SERVICE_DELETE_DEVICE, delete_device,
schema=DELETE_DEVICE_SCHEMA)
hubs = pywink.get_hubs()
for hub in hubs:
if hub.device_manufacturer() == 'wink':
WINK_HUBS.append(hub)
if WINK_HUBS:
hass.services.register(
DOMAIN, SERVICE_SET_PAIRING_MODE, set_pairing_mode,
schema=SET_PAIRING_MODE_SCHEMA)
def nimbus_service_handle(service):
"""Handle nimbus services."""
entity_id = service.data.get('entity_id')[0]
_all_dials = []
for sensor in hass.data[DOMAIN]['entities']['sensor']:
if isinstance(sensor, WinkNimbusDialDevice):
_all_dials.append(sensor)
for _dial in _all_dials:
if _dial.entity_id == entity_id:
if service.service == SERVICE_SET_DIAL_CONFIG:
_dial.set_configuration(**service.data)
if service.service == SERVICE_SET_DIAL_STATE:
_dial.wink.set_state(service.data.get("value"),
service.data.get("labels"))
def siren_service_handle(service):
"""Handle siren services."""
entity_ids = service.data.get('entity_id')
all_sirens = []
for switch in hass.data[DOMAIN]['entities']['switch']:
if isinstance(switch, WinkSirenDevice):
all_sirens.append(switch)
sirens_to_set = []
if entity_ids is None:
sirens_to_set = all_sirens
else:
for siren in all_sirens:
if siren.entity_id in entity_ids:
sirens_to_set.append(siren)
for siren in sirens_to_set:
_man = siren.wink.device_manufacturer()
if (service.service != SERVICE_SET_AUTO_SHUTOFF and
service.service != SERVICE_ENABLE_SIREN and
_man not in ('dome', 'wink')):
_LOGGER.error("Service only valid for Dome or Wink sirens")
return
if service.service == SERVICE_ENABLE_SIREN:
siren.wink.set_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_AUTO_SHUTOFF:
siren.wink.set_auto_shutoff(
service.data.get(ATTR_AUTO_SHUTOFF))
elif service.service == SERVICE_SET_CHIME_VOLUME:
siren.wink.set_chime_volume(service.data.get(ATTR_VOLUME))
elif service.service == SERVICE_SET_SIREN_VOLUME:
siren.wink.set_siren_volume(service.data.get(ATTR_VOLUME))
elif service.service == SERVICE_SET_SIREN_TONE:
siren.wink.set_siren_sound(service.data.get(ATTR_TONE))
elif service.service == SERVICE_ENABLE_CHIME:
siren.wink.set_chime(service.data.get(ATTR_TONE))
elif service.service == SERVICE_SIREN_STROBE_ENABLED:
siren.wink.set_siren_strobe_enabled(
service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_CHIME_STROBE_ENABLED:
siren.wink.set_chime_strobe_enabled(
service.data.get(ATTR_ENABLED))
# Load components for the devices in Wink that we support
for wink_component in WINK_COMPONENTS:
hass.data[DOMAIN]['entities'][wink_component] = []
discovery.load_platform(hass, wink_component, DOMAIN, {}, config)
component = EntityComponent(_LOGGER, DOMAIN, hass)
sirens = []
has_dome_or_wink_siren = False
for siren in pywink.get_sirens():
_man = siren.device_manufacturer()
if _man in ("dome", "wink"):
has_dome_or_wink_siren = True
_id = siren.object_id() + siren.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
sirens.append(WinkSirenDevice(siren, hass))
if sirens:
hass.services.register(DOMAIN, SERVICE_SET_AUTO_SHUTOFF,
siren_service_handle,
schema=SET_AUTO_SHUTOFF_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ENABLE_SIREN,
siren_service_handle,
schema=ENABLED_SIREN_SCHEMA)
if has_dome_or_wink_siren:
hass.services.register(DOMAIN, SERVICE_SET_SIREN_TONE,
siren_service_handle,
schema=SET_SIREN_TONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ENABLE_CHIME,
siren_service_handle,
schema=SET_CHIME_MODE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_SIREN_VOLUME,
siren_service_handle,
schema=SET_VOLUME_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_CHIME_VOLUME,
siren_service_handle,
schema=SET_VOLUME_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SIREN_STROBE_ENABLED,
siren_service_handle,
schema=SET_STROBE_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_CHIME_STROBE_ENABLED,
siren_service_handle,
schema=SET_STROBE_ENABLED_SCHEMA)
component.add_entities(sirens)
nimbi = []
dials = {}
all_nimbi = pywink.get_cloud_clocks()
all_dials = []
for nimbus in all_nimbi:
if nimbus.object_type() == "cloud_clock":
nimbi.append(nimbus)
dials[nimbus.object_id()] = []
for nimbus in all_nimbi:
if nimbus.object_type() == "dial":
dials[nimbus.parent_id()].append(nimbus)
for nimbus in nimbi:
for dial in dials[nimbus.object_id()]:
all_dials.append(WinkNimbusDialDevice(nimbus, dial, hass))
if nimbi:
hass.services.register(DOMAIN, SERVICE_SET_DIAL_CONFIG,
nimbus_service_handle,
schema=DIAL_CONFIG_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_DIAL_STATE,
nimbus_service_handle,
schema=DIAL_STATE_SCHEMA)
component.add_entities(all_dials)
return True
class WinkAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
url = '/auth/wink/callback'
name = 'auth:wink:callback'
requires_auth = False
def __init__(self, config, config_file, request_token):
"""Initialize the OAuth callback view."""
self.config = config
self.config_file = config_file
self.request_token = request_token
@callback
def get(self, request):
"""Finish OAuth callback request."""
from aiohttp import web
hass = request.app['hass']
data = request.query
response_message = """Wink has been successfully authorized!
You can close this window now! For the best results you should reboot
HomeAssistant"""
html_response = """<html><head><title>Wink Auth</title></head>
<body><h1>{}</h1></body></html>"""
if data.get('code') is not None:
response = self.request_token(
data.get('code'), self.config_file['client_secret'])
config_contents = {
ATTR_ACCESS_TOKEN: response['access_token'],
ATTR_REFRESH_TOKEN: response['refresh_token'],
ATTR_CLIENT_ID: self.config_file['client_id'],
ATTR_CLIENT_SECRET: self.config_file['client_secret']
}
save_json(hass.config.path(WINK_CONFIG_FILE), config_contents)
hass.async_add_job(setup, hass, self.config)
return web.Response(text=html_response.format(response_message),
content_type='text/html')
error_msg = "No code returned from Wink API"
_LOGGER.error(error_msg)
return web.Response(text=html_response.format(error_msg),
content_type='text/html')
class WinkDevice(Entity):
"""Representation a base Wink device."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
self.hass = hass
self.wink = wink
hass.data[DOMAIN]['pubnub'].add_subscription(
self.wink.pubnub_channel, self._pubnub_update)
hass.data[DOMAIN]['unique_ids'].append(self.wink.object_id() +
self.wink.name())
def _pubnub_update(self, message):
_LOGGER.debug(message)
try:
if message is None:
_LOGGER.error("Error on pubnub update for %s "
"polling API for current state", self.name)
self.schedule_update_ha_state(True)
else:
self.wink.pubnub_update(message)
self.schedule_update_ha_state()
except (ValueError, KeyError, AttributeError):
_LOGGER.error("Error in pubnub JSON for %s "
"polling API for current state", self.name)
self.schedule_update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self.wink.name()
@property
def unique_id(self):
"""Return the unique id of the Wink device."""
if hasattr(self.wink, 'capability') and \
self.wink.capability() is not None:
return "{}_{}".format(self.wink.object_id(),
self.wink.capability())
return self.wink.object_id()
@property
def available(self):
"""Return true if connection == True."""
return self.wink.available()
def update(self):
"""Update state of the device."""
self.wink.update_state()
@property
def should_poll(self):
"""Only poll if we are not subscribed to pubnub."""
return self.wink.pubnub_channel is None
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
battery = self._battery_level
if battery:
attributes[ATTR_BATTERY_LEVEL] = battery
man_dev_model = self._manufacturer_device_model
if man_dev_model:
attributes["manufacturer_device_model"] = man_dev_model
man_dev_id = self._manufacturer_device_id
if man_dev_id:
attributes["manufacturer_device_id"] = man_dev_id
dev_man = self._device_manufacturer
if dev_man:
attributes["device_manufacturer"] = dev_man
model_name = self._model_name
if model_name:
attributes["model_name"] = model_name
tamper = self._tamper
if tamper is not None:
attributes["tamper_detected"] = tamper
return attributes
@property
def _battery_level(self):
"""Return the battery level."""
if self.wink.battery_level() is not None:
return self.wink.battery_level() * 100
@property
def _manufacturer_device_model(self):
"""Return the manufacturer device model."""
return self.wink.manufacturer_device_model()
@property
def _manufacturer_device_id(self):
"""Return the manufacturer device id."""
return self.wink.manufacturer_device_id()
@property
def _device_manufacturer(self):
"""Return the device manufacturer."""
return self.wink.device_manufacturer()
@property
def _model_name(self):
"""Return the model name."""
return self.wink.model_name()
@property
def _tamper(self):
"""Return the devices tamper status."""
if hasattr(self.wink, 'tamper_detected'):
return self.wink.tamper_detected()
return None
class WinkSirenDevice(WinkDevice):
"""Representation of a Wink siren device."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['switch'].append(self)
@property
def state(self):
"""Return sirens state."""
if self.wink.state():
return STATE_ON
return STATE_OFF
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:bell-ring"
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = super(WinkSirenDevice, self).device_state_attributes
auto_shutoff = self.wink.auto_shutoff()
if auto_shutoff is not None:
attributes["auto_shutoff"] = auto_shutoff
siren_volume = self.wink.siren_volume()
if siren_volume is not None:
attributes["siren_volume"] = siren_volume
chime_volume = self.wink.chime_volume()
if chime_volume is not None:
attributes["chime_volume"] = chime_volume
strobe_enabled = self.wink.strobe_enabled()
if strobe_enabled is not None:
attributes["siren_strobe_enabled"] = strobe_enabled
chime_strobe_enabled = self.wink.chime_strobe_enabled()
if chime_strobe_enabled is not None:
attributes["chime_strobe_enabled"] = chime_strobe_enabled
siren_sound = self.wink.siren_sound()
if siren_sound is not None:
attributes["siren_sound"] = siren_sound
chime_mode = self.wink.chime_mode()
if chime_mode is not None:
attributes["chime_mode"] = chime_mode
return attributes
class WinkNimbusDialDevice(WinkDevice):
"""Representation of the Quirky Nimbus device."""
def __init__(self, nimbus, dial, hass):
"""Initialize the Nimbus dial."""
super().__init__(dial, hass)
self.parent = nimbus
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['sensor'].append(self)
@property
def state(self):
"""Return dials current value."""
return self.wink.state()
@property
def name(self):
"""Return the name of the device."""
return self.parent.name() + " dial " + str(self.wink.index() + 1)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = super(WinkNimbusDialDevice, self).device_state_attributes
dial_attributes = self.dial_attributes()
return {**attributes, **dial_attributes}
def dial_attributes(self):
"""Return the dial only attributes."""
return {
"labels": self.wink.labels(),
"position": self.wink.position(),
"rotation": self.wink.rotation(),
"max_value": self.wink.max_value(),
"min_value": self.wink.min_value(),
"num_ticks": self.wink.ticks(),
"scale_type": self.wink.scale(),
"max_position": self.wink.max_position(),
"min_position": self.wink.min_position()
}
def set_configuration(self, **kwargs):
"""
Set the dial config.
Anything not sent will default to current setting.
"""
attributes = {**self.dial_attributes(), **kwargs}
min_value = attributes["min_value"]
max_value = attributes["max_value"]
rotation = attributes["rotation"]
ticks = attributes["num_ticks"]
scale = attributes["scale_type"]
min_position = attributes["min_position"]
max_position = attributes["max_position"]
self.wink.set_configuration(min_value, max_value, rotation,
scale=scale, ticks=ticks,
min_position=min_position,
max_position=max_position)
| {
"content_hash": "403621608c0761851187a0d380a15adf",
"timestamp": "",
"source": "github",
"line_count": 886,
"max_line_length": 78,
"avg_line_length": 36.87020316027088,
"alnum_prop": 0.6033918021244681,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "2b03d7711acbd6b60720e2712ac87da408322a39",
"size": "32667",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/wink/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
"""
sphinx.util.i18n
~~~~~~~~~~~~~~~~
Builder superclass for all builders.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import gettext
from os import path
from collections import namedtuple
from babel.messages.pofile import read_po
from babel.messages.mofile import write_mo
from sphinx.util.osutil import walk
from sphinx.util import SEP
LocaleFileInfoBase = namedtuple('CatalogInfo', 'base_dir,domain')
class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self):
return self.domain + '.po'
@property
def mo_file(self):
return self.domain + '.mo'
@property
def po_path(self):
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self):
return path.join(self.base_dir, self.mo_file)
def is_outdated(self):
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale):
with open(self.po_path, 'rt') as po:
with open(self.mo_path, 'wb') as mo:
write_mo(mo, read_po(po, locale))
def find_catalog(docname, compaction):
if compaction:
ret = docname.split(SEP, 1)[0]
else:
ret = docname
return ret
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
if not(lang and locale_dirs):
return []
domain = find_catalog(docname, compaction)
files = [gettext.find(domain, path.join(srcdir, dir_), [lang])
for dir_ in locale_dirs]
files = [path.relpath(f, srcdir) for f in files if f]
return files
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=False,
force_all=False):
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
translation catalogs. Each path contains a structure such as
`<locale>/LC_MESSAGES/domain.po`.
:param str locale: a language as `'en'`
:param list domains: list of domain names to get. If empty list or None
is specified, get all domain names. default is None.
:param boolean gettext_compact:
* False: keep domains directory structure (default).
* True: domains in the sub directory will be merged into 1 file.
:param boolean force_all:
Set True if you want to get all catalogs rather than updated catalogs.
default is False.
:return: [CatalogInfo(), ...]
"""
if not locale:
return [] # locale is not specified
catalogs = set()
for locale_dir in locale_dirs:
if not locale_dir:
continue # skip system locale directory
base_dir = path.join(locale_dir, locale, 'LC_MESSAGES')
if not path.exists(base_dir):
continue # locale path is not found
for dirpath, dirnames, filenames in walk(base_dir, followlinks=True):
filenames = [f for f in filenames if f.endswith('.po')]
for filename in filenames:
base = path.splitext(filename)[0]
domain = path.relpath(path.join(dirpath, base), base_dir)
if gettext_compact and path.sep in domain:
domain = path.split(domain)[0]
domain = domain.replace(path.sep, SEP)
if domains and domain not in domains:
continue
cat = CatalogInfo(base_dir, domain)
if force_all or cat.is_outdated():
catalogs.add(cat)
return catalogs
| {
"content_hash": "f828cc96fbbb387503f2bea7882af763",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 87,
"avg_line_length": 30.97457627118644,
"alnum_prop": 0.612859097127223,
"repo_name": "ArcherSys/ArcherSys",
"id": "efdc31828be89475fb256d98e9dabf0db0114006",
"size": "3679",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/sphinx/util/i18n.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import print_function
import logging
import sys
import time
# THIRD PARTY
from django.db import DataError
from django.db.migrations.operations.base import Operation
from google.appengine.api.datastore import Delete, Query, Get, Key, Put, RunInTransaction
from google.appengine.api import datastore_errors
from google.appengine.runtime import DeadlineExceededError
# DJANGAE
from djangae.db.backends.appengine.caching import remove_entities_from_cache_by_key
from djangae.db.backends.appengine.commands import reserve_id
from . import mapper_library
from .constants import TASK_RECHECK_INTERVAL
from .utils import do_with_retry, clone_entity
TESTING = 'test' in sys.argv
class DjangaeMigration(object):
""" Base class to enable us to distinguish between Djangae migrations and Django migrations.
"""
pass
class BaseEntityMapperOperation(Operation, DjangaeMigration):
""" Base class for operations which map over Datastore Entities, rather than Django model
instances.
"""
reversible = False
reduces_to_sql = False
def __init__(self, *args, **kwargs):
self.uid = kwargs.pop("uid", "")
self.shard_count = kwargs.pop("shard_count", None)
self.entities_per_task = kwargs.pop("entities_per_task", None)
self.queue = kwargs.pop("queue", None)
self.skip_errors = kwargs.pop("skip_errors", False)
super(BaseEntityMapperOperation, self).__init__(*args, **kwargs)
def state_forwards(self, app_label, state):
""" As all Djangae migrations are only supplements to the Django migrations, we don't need
to do any altering of the model state.
"""
pass
def _print(self, *objects):
if not TESTING:
print(*objects)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# Django's `migrate` command writes to stdout without a trailing line break, which means
# that unless we print a blank line our first print statement is on the same line
self._print("") # yay
self.identifier = self._get_identifier(app_label, schema_editor, from_state, to_state)
if self.uid:
self.identifier = "{}.{}".format(self.uid, self.identifier)
self._set_map_kind(app_label, schema_editor, from_state, to_state)
self._pre_map_hook(app_label, schema_editor, from_state, to_state)
self.namespace = schema_editor.connection.settings_dict.get("NAMESPACE")
if mapper_library.mapper_exists(self.identifier, self.namespace):
self._wait_until_task_finished()
return
self._print("Deferring migration operation task for %s" % self.identifier)
self._start_task()
def database_backwards(self, app_label, schema_editor, from_state, to_state):
raise NotImplementedError("Erm...? Help?!")
def _wait_until_task_finished(self):
if mapper_library.is_mapper_finished(self.identifier, self.namespace):
self._print("Task for migration operation '%s' already finished. Skipping." % self.identifier)
return
while mapper_library.is_mapper_running(self.identifier, self.namespace):
self._print("Waiting for migration operation '%s' to complete." % self.identifier)
time.sleep(TASK_RECHECK_INTERVAL)
self._print("Migration operation '%s' completed!" % self.identifier)
def _start_task(self):
assert not mapper_library.is_mapper_running(self.identifier, self.namespace), "Migration started by separate thread?"
query = Query(self.map_kind, namespace=self.namespace)
return mapper_library.start_mapping(
self.identifier, query, self, operation_method="_wrapped_map_entity",
shard_count=self.shard_count, entities_per_task=self.entities_per_task,
queue=self.queue
)
def _wrapped_map_entity(self, entity):
""" Wrapper for self._map_entity which removes the entity from Djangae's cache. """
# TODO: Note that other threads (from the general application running) could also be
# modifying the entity, and that we're not using Djangae's transaction managers for our
# stuff here.
remove_entities_from_cache_by_key([entity.key()], self.namespace)
try:
do_with_retry(self._map_entity, entity)
except DeadlineExceededError:
# This is (probably) not an error with the individual entity, but more likey that the
# task has tried to process too many entities. Either way, we always re-raise it so
# that the mapper library can deal with it
raise
except Exception:
if self.skip_errors:
logging.exception(
"Error processing operation %s for entity %s. Skipping.",
self.identifier, entity.key()
)
else:
raise
if entity.key():
# Assuming the entity hasn't been deleted and/or it's key been wiped...
remove_entities_from_cache_by_key([entity.key()], self.namespace)
##############################################################################################
# METHODS FOR SUBCLASSES TO IMPLEMENT
##############################################################################################
def _pre_map_hook(self, app_label, schema_editor, from_state, to_state):
""" A hook for subclasses to do anything that needs to be done before the mapping starts
but which cannot be done in __init__ due to the need for the schema_editor/state/etc.
"""
pass
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
""" Return an ID for self.identifier, which must be a string which uniquely identifies this operation
across the entire site. It must be able to fit in a Datastore string property.
This will likely need to use app_label combined with values passed to __init__.
"""
raise NotImplementedError(
"Subclasses of EntityMapperOperation must implement _get_identifier"
)
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
""" Set an attribute 'map_kind' of the 'kind' of Datastore Entities to be mapped over. """
raise NotImplementedError(
"Subclasses of EntityMapperOperation must implement _set_map_kind"
)
def _map_entity(self, entity):
""" Hook for subclasses to implement. This is called for every Entity and should do
whatever data manipulation is necessary. Note that whatever you do to the entity
must be done transactionally; this is not wrapped in a transaction.
"""
raise NotImplementedError("Subclasses of EntityMapperOperation must implement _map_entity")
class AddFieldData(BaseEntityMapperOperation):
def __init__(self, model_name, name, field, **kwargs):
self.model_name = model_name
self.name = name
self.field = field
super(AddFieldData, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
identifier = "%s.%s.%s:%s" % (
app_label, self.model_name, self.__class__.__name__, self.name
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
kind = model._meta.db_table
self.map_kind = kind
def _map_entity(self, entity):
column_name = self.field.db_column or self.name
# Call get_default() separately for each entity, in case it's a callable like timezone.now
value = self.field.get_default()
def txn(entity):
entity = Get(entity.key())
entity[column_name] = value
Put(entity)
RunInTransaction(txn, entity)
class RemoveFieldData(BaseEntityMapperOperation):
def __init__(self, model_name, name, field, **kwargs):
self.model_name = model_name
self.name = name
self.field = field
super(RemoveFieldData, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
identifier = "%s.%s.%s:%s" % (
app_label, self.model_name, self.__class__.__name__, self.name
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
kind = model._meta.db_table
self.map_kind = kind
def _map_entity(self, entity):
column_name = self.field.db_column or self.name
def txn(entity):
entity = Get(entity.key())
try:
del entity[column_name]
except KeyError:
return
Put(entity)
RunInTransaction(txn, entity)
class CopyFieldData(BaseEntityMapperOperation):
def __init__(self, model_name, from_column_name, to_column_name, **kwargs):
self.model_name = model_name
self.from_column_name = from_column_name
self.to_column_name = to_column_name
super(CopyFieldData, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
identifier = "%s.%s.%s:%s.%s" % (
app_label, self.model_name, self.__class__.__name__,
self.from_column_name, self.to_column_name
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
kind = model._meta.db_table
self.map_kind = kind
def _map_entity(self, entity):
def txn(entity):
entity = Get(entity.key())
try:
entity[self.to_column_name] = entity[self.from_column_name]
except KeyError:
return
Put(entity)
RunInTransaction(txn, entity)
class DeleteModelData(BaseEntityMapperOperation):
def __init__(self, model_name, **kwargs):
self.model_name = model_name
super(DeleteModelData, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
identifier = "%s.%s:%s" % (
app_label, self.model_name, self.__class__.__name__
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
kind = model._meta.db_table
self.map_kind = kind
def _map_entity(self, entity):
try:
Delete(entity.key())
except datastore_errors.EntityNotFoundError:
return
class CopyModelData(BaseEntityMapperOperation):
""" Copies entities from one entity kind to another. """
def __init__(
self, model_name, to_app_label, to_model_name,
overwrite_existing=False, **kwargs
):
self.model_name = model_name
self.to_app_label = to_app_label
self.to_model_name = to_model_name
self.overwrite_existing = overwrite_existing
super(CopyModelData, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
identifier = "%s.%s.%s:%s.%s" % (
app_label, self.model_name, self.__class__.__name__,
self.to_app_label, self.to_model_name
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
""" We need to map over the entities that we're copying *from*. """
model = to_state.apps.get_model(app_label, self.model_name)
kind = model._meta.db_table
self.map_kind = kind
def _pre_map_hook(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(self.to_app_label, self.to_model_name)
self.to_kind = to_model._meta.db_table
def _map_entity(self, entity):
new_key = Key.from_path(self.to_kind, entity.key().id_or_name(), namespace=self.namespace)
def txn():
try:
existing = Get(new_key)
except datastore_errors.EntityNotFoundError:
existing = None
if existing and not self.overwrite_existing:
return
if isinstance(entity.key().id_or_name(), (int, long)):
reserve_id(self.to_kind, entity.key().id_or_name(), self.namespace)
new_entity = clone_entity(entity, new_key)
Put(new_entity)
RunInTransaction(txn)
class CopyModelDataToNamespace(BaseEntityMapperOperation):
""" Copies entities from one Datastore namespace to another. """
def __init__(
self, model_name, to_namespace, to_app_label=None, to_model_name=None,
overwrite_existing=False, **kwargs
):
self.model_name = model_name
self.to_namespace = to_namespace
self.to_app_label = to_app_label
self.to_model_name = to_model_name
self.overwrite_existing = overwrite_existing
super(CopyModelDataToNamespace, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
to_app_label = self.to_app_label or app_label
to_model_name = self.to_model_name or self.model_name
identifier = "%s.%s.%s:%s.%s.%s" % (
app_label, self.model_name, self.__class__.__name__, self.to_namespace, to_app_label,
to_model_name
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
""" We need to map over the entities that we're copying *from*. """
model = to_state.apps.get_model(app_label, self.model_name)
self.map_kind = model._meta.db_table
def _pre_map_hook(self, app_label, schema_editor, from_state, to_state):
to_app_label = self.to_app_label or app_label
to_model_name = self.to_model_name or self.model_name
to_model = to_state.apps.get_model(to_app_label, to_model_name)
self.to_kind = to_model._meta.db_table
def _map_entity(self, entity):
new_key = Key.from_path(
self.to_kind, entity.key().id_or_name(), namespace=self.to_namespace
)
parent = entity.parent()
if parent:
# If the entity has an ancestor then we need to make sure that that ancestor exists in
# the new namespace as well
new_parent_key = Key.from_path(
parent.kind(), parent.is_or_name(), namespace=self.to_namespace
)
new_parent_exists = Get([new_parent_key])[0]
if not new_parent_exists:
raise DataError(
"Trying to copy entity with an ancestor (%r) to a new namespace but the "
"ancestor does not exist in the new namespace. Copy the ancestors first."
% entity.key()
)
def txn():
existing = Get([new_key])[0]
if existing and not self.overwrite_existing:
return
if isinstance(entity.key().id_or_name(), (int, long)):
reserve_id(self.to_kind, entity.key().id_or_name(), self.to_namespace)
new_entity = clone_entity(entity, new_key)
Put(new_entity)
RunInTransaction(txn)
class MapFunctionOnEntities(BaseEntityMapperOperation):
""" Operation for calling a custom function on each entity of a given model. """
def __init__(self, model_name, function, **kwargs):
self.model_name = model_name
self.function = function
super(MapFunctionOnEntities, self).__init__(**kwargs)
def _get_identifier(self, app_label, schema_editor, from_state, to_state):
identifier = "%s.%s.%s:%s" % (
app_label, self.model_name, self.__class__.__name__, self.function.__name__
)
return identifier
def _set_map_kind(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
kind = model._meta.db_table
self.map_kind = kind
def _map_entity(self, entity):
self.function(entity)
| {
"content_hash": "4a3543d3f6f0d304698eb6c9890c1636",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 125,
"avg_line_length": 39.38424821002387,
"alnum_prop": 0.6136225912010665,
"repo_name": "grzes/djangae",
"id": "7ee4e12198e361ca9efa8884b7260f2f67ce9954",
"size": "16517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangae/db/migrations/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2305"
},
{
"name": "Python",
"bytes": "1074740"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
from girc import message
from girc.common import iterable
class Handler(object):
"""A handler object manages a handler callback.
It wraps the callback with additional metadata governing matches.
It is normally registered to a client automatically at init time, or you can manually register it later
with handler.register(client).
You can set before and after kwargs with other handlers (or lists of handlers) to control the order in which
handlers are executed.
You can also set sync=True to specify that the client should not process the next message until this handler
has finished.
(In truth, sync=True is just a shortcut for before='sync'.
after='sync' is also valid, though likely not useful.)
To support its use as a decorator for methods of a class, it will bind to a class instance on __get__
just like a function object. In addition, if you do instance.handler.register(client), then any callbacks
from that client will be associated with that instance.
So for example, this will work how you expect:
class Foo(object):
@Handler(**match_args)
def callback(self, client, msg):
...
def __init__(self, client):
self.callback.register(client)
but so will this:
@Handler(**match_args)
def callback(client, msg):
...
def set_client(client):
callback.register(client)
"""
def __init__(self, client=None, callback=None, before=[], after=[], sync=False, **match_args):
"""Register a handler for client to call when a message matches.
Match args are as per message.match()
Callback should take args (client, message) and may return True to de-register itself.
If callback object is already a Handler, the new init data is merged into the old handler.
Callback may be omitted, in which case the first time Handler() is called it will act as a decorator,
binding to the given argument.
Client may be omitted, in which case the handler must later be bound to a client with handler.register().
"""
self.match_list = [] # list of match_args dicts to match on
self.client_binds = {} # maps {client: set(instances to bind and call)}
self.before = set(before) if iterable(before) else [before]
self.after = set(after) if iterable(after) else [after]
if sync:
self.before.add('sync')
self.add_match(**match_args)
self.set_callback(callback)
if client:
self.register(client)
def __repr__(self):
return "<{cls.__name__}({self.callback})>".format(cls=type(self), self=self)
def add_match(self, **match_args):
"""Add a new set of match_args to the handler. Either this new set or the existing set matching will
trigger the handler."""
self.match_list.append(match_args)
def register(self, client, instance=None):
"""Register handler with a client. Optional arg instance is for internal use, and is used to implement
the "call with callback bound to instance" functionality as described in the class docstring."""
self.client_binds.setdefault(client, set()).add(instance)
client.message_handlers.add(self)
client.logger.info("Registering handler {} with match args {}".format(self, self.match_list))
def unregister(self, client, instance=None):
"""Remove association of handler with client."""
# note: if instance given, we only want to disassociate that instance with that client,
# unless it is the last bind for that client.
if client not in self.client_binds:
return
self.client_binds[client].discard(instance)
if not self.client_binds[client]:
client.message_handlers.discard(self)
del self.client_binds[client]
@classmethod
def find_handlers(cls, instance):
"""Returns a set of BoundHandlers for Handlers that are methods for given instance."""
result = set()
for attr in dir(instance):
value = getattr(instance, attr)
if isinstance(value, BoundHandler):
result.add(value)
return result
@classmethod
def register_all(cls, client, instance):
"""Find methods of the given instance that are Handlers, and register them to client."""
for handler in cls.find_handlers(instance):
handler.register(client)
@classmethod
def unregister_all(cls, client, instance):
"""As register_all(), unregisters any handlers for instance if registered"""
for handler in cls.find_handlers(instance):
handler.unregister(client)
def unregister_for_client(self, client):
"""You probably don't want this. It removes all registrations with client, not just for one instance.
It is mainly intended for a client to call when it is stopping."""
self.client_binds.pop(client, None)
def set_callback(self, callback):
self.callback = callback
def __call__(self, *args, **kwargs):
"""If callback not set, set callback and return self (for decorator use). Else, call callback normally."""
if self.callback:
return self.callback(*args, **kwargs)
self.set_callback(*args, **kwargs)
return self
def __get__(self, instance, cls):
if instance is None:
return self
return BoundHandler(self, instance)
def _handle(self, client, msg, instance=None):
"""subclasses can hook here to customise how the callback is called without changing the behaviour
of a naive __call__."""
return self(instance, client, msg) if instance else self(client, msg)
def handle(self, client, msg):
try:
if not any(message.match(msg, **match_args) for match_args in self.match_list):
return
except message.InvalidMessage:
client.logger.warning("Problem with message {} while matching handler {}".format(msg, self),
exc_info=True)
return
if not self.callback:
return
client.logger.debug("Handling message {} with handler {}".format(msg, self))
for instance in self.client_binds.get(client, set()).copy():
try:
ret = self._handle(client, msg, instance=instance)
except Exception:
client.logger.exception("Handler {} failed{}".format(
self, (' for instance {}'.format(instance) if instance else '')))
else:
if ret:
self.unregister(client, instance)
class BoundHandler(object):
"""A wrapper around a handler that applies the bound instance to certain operations."""
def __init__(self, handler, instance):
self.handler = handler
self.instance = instance
def register(self, client):
return self.handler.register(client, self.instance)
def unregister(self, client):
return self.handler.unregister(client, self.instance)
def __call__(self, *args, **kwargs):
if not self.handler.callback:
raise ValueError("Cannot set callback from BoundHandler")
return self.handler(self.instance, *args, **kwargs)
| {
"content_hash": "2ed1da598cfac42c51ca8439d0f153b6",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 109,
"avg_line_length": 38.86390532544379,
"alnum_prop": 0.7193970767356882,
"repo_name": "ekimekim/girc",
"id": "f64ba9913d9f90f4debe2a3ae4e6510c1343234a",
"size": "6569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girc/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78864"
}
],
"symlink_target": ""
} |
import pytest
from web3.utils.abi import (
function_abi_to_4byte_selector,
)
@pytest.fixture(autouse=True)
def wait_for_first_block(web3, wait_for_block):
wait_for_block(web3)
@pytest.fixture()
def math_contract(web3, MATH_ABI, MATH_CODE, MATH_RUNTIME, MATH_SOURCE,
wait_for_transaction):
MathContract = web3.eth.contract(
abi=MATH_ABI,
code=MATH_CODE,
code_runtime=MATH_RUNTIME,
source=MATH_SOURCE,
)
deploy_txn = MathContract.deploy({'from': web3.eth.coinbase})
deploy_receipt = wait_for_transaction(web3, deploy_txn)
assert deploy_receipt is not None
contract_address = deploy_receipt['contractAddress']
web3.isAddress(contract_address)
_math_contract = MathContract(address=contract_address)
return _math_contract
def test_contract_estimateGas(web3, math_contract):
increment_abi = math_contract._find_matching_fn_abi('increment', [])
call_data = function_abi_to_4byte_selector(increment_abi)
gas_estimate = math_contract.estimateGas().increment()
try:
assert abs(gas_estimate - 21272) < 200 # Geth
except AssertionError:
assert abs(gas_estimate - 42820) < 200 # TestRPC
pass
| {
"content_hash": "42f29a9995cf02422e9c7475430d1058",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.6804878048780488,
"repo_name": "shravan-shandilya/web3.py",
"id": "365c26f139ddc0e5ac2968a64bacb813fe5b7ead",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/contracts/test_contract_estimateGas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "926"
},
{
"name": "Python",
"bytes": "306962"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
@never_cache
def metrics(request):
"""Exposes prometheus metrics"""
return HttpResponse(generate_latest(), content_type=CONTENT_TYPE_LATEST)
| {
"content_hash": "1642e954c81d9a70db0125c007d3b6b6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 31,
"alnum_prop": 0.7935483870967742,
"repo_name": "micktwomey/gamecraft-mk-iii",
"id": "6d845bead79007d9272ad8979290c4b8b801b0c1",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamecraft/django_prometheus_light/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25876"
},
{
"name": "HTML",
"bytes": "24346"
},
{
"name": "JavaScript",
"bytes": "788538"
},
{
"name": "Makefile",
"bytes": "1392"
},
{
"name": "Python",
"bytes": "137898"
},
{
"name": "Shell",
"bytes": "1271"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import iso8601
import arrow
import logging
from impactstoryanalytics.widgets.widget import Widget, get_raw_dataclip_data
logger = logging.getLogger("impactstoryanalytics.widgets.hourly_new_users")
class Hourly_new_users(Widget):
new_accounts_query_url = "https://dataclips.heroku.com/hefcjkzcteluxosfhdvsofefjrjr.json"
def get_data(self):
number_of_datapoints = 72
datapoints = get_raw_dataclip_data(self.new_accounts_query_url)["values"][0:number_of_datapoints]
pans = Widget.get_time_pan_list(number_of_datapoints, interval="hour")
for datapoint in datapoints:
(iso_time, new_accounts, total_accounts) = datapoint
time = arrow.get(str(iso_time), 'YYYY-MM-DDTHH:mm:ss')
pans.add_to_pan(time, "new_accounts", int(new_accounts))
return pans.replace_NAs_with_zeroes().as_list()
| {
"content_hash": "8568567a2b6243c2848c2e80b9d3196e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 105,
"avg_line_length": 37.625,
"alnum_prop": 0.70874861572536,
"repo_name": "Impactstory/impactstory-analytics",
"id": "af530a8459e4cac1eacb55614d2a58e268a2a4eb",
"size": "903",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "impactstoryanalytics/widgets/hourly_new_users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25774"
},
{
"name": "HTML",
"bytes": "16197"
},
{
"name": "JavaScript",
"bytes": "414447"
},
{
"name": "Perl",
"bytes": "60"
},
{
"name": "Python",
"bytes": "103688"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
import pickle
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.websocket
import base64
import threading
from time import sleep
from RealSense import Streamer
SERVER_CLOSING = 'Server has shut down'
LOG_PREFIX = 'SERVER:'
PORT_NUMBER = 8888
class SentinelServer(tornado.websocket.WebSocketHandler):
stream = None
def initialize(self):
self.realsense_stream = SentinelServer.stream
def send_frame(self):
frame = self.realsense_stream.next()
encoded_frame = pickle.dumps(frame, 2)
self.write_message(encoded_frame, binary = True)
def on_pong(self, pong):
self.log('Pong:', pong)
def open(self):
self.stream.set_nodelay(True)
self.ping(b'ping')
self.send_frame()
def log(self, *args):
print(LOG_PREFIX, *args)
def on_message(self, message):
self.log(pickle.loads(message))
self.send_frame()
def on_close(self):
self.log('Lost connection to client.')
if __name__ == '__main__':
with Streamer() as stream:
try:
SentinelServer.stream = stream
application = tornado.web.Application([(r'/ws', SentinelServer),])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(PORT_NUMBER)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt as interrupt:
pass
| {
"content_hash": "450e8439cd071d1ac52151a8f223c316",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 82,
"avg_line_length": 26.083333333333332,
"alnum_prop": 0.6466453674121406,
"repo_name": "MST-MRR/DroneKit",
"id": "6594b5868bc22d35459865a47e5d5a8722108e0d",
"size": "1565",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "Vision/HTTPServer/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "4092"
},
{
"name": "C++",
"bytes": "551725"
},
{
"name": "CSS",
"bytes": "4784"
},
{
"name": "HTML",
"bytes": "267862"
},
{
"name": "JavaScript",
"bytes": "269634"
},
{
"name": "Makefile",
"bytes": "5688"
},
{
"name": "Python",
"bytes": "314086"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 Basho Technologies, Inc.
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import json
import string
import re
from cgi import parse_header
from email import message_from_string
from riak.util import decode_index_value
from riak.client.index_page import CONTINUATION
class RiakHttpStream(object):
"""
Base class for HTTP streaming iterators.
"""
BLOCK_SIZE = 2048
def __init__(self, response):
self.response = response
self.buffer = ''
self.response_done = False
def __iter__(self):
return self
def _read(self):
chunk = self.response.read(self.BLOCK_SIZE)
if chunk == '':
self.response_done = True
self.buffer += chunk
def next(self):
raise NotImplementedError
def close(self):
pass
class RiakHttpJsonStream(RiakHttpStream):
_json_field = None
def next(self):
while '}' not in self.buffer and not self.response_done:
self._read()
if '}' in self.buffer:
idx = string.index(self.buffer, '}') + 1
chunk = self.buffer[:idx]
self.buffer = self.buffer[idx:]
field = json.loads(chunk)[self._json_field]
return field
else:
raise StopIteration
class RiakHttpKeyStream(RiakHttpJsonStream):
"""
Streaming iterator for list-keys over HTTP
"""
_json_field = u'keys'
class RiakHttpBucketStream(RiakHttpJsonStream):
"""
Streaming iterator for list-buckets over HTTP
"""
_json_field = u'buckets'
class RiakHttpMultipartStream(RiakHttpStream):
"""
Streaming iterator for multipart messages over HTTP
"""
def __init__(self, response):
super(RiakHttpMultipartStream, self).__init__(response)
ctypehdr = response.getheader('content-type')
_, params = parse_header(ctypehdr)
self.boundary_re = re.compile('\r?\n--%s(?:--)?\r?\n' %
re.escape(params['boundary']))
self.next_boundary = None
self.seen_first = False
def next(self):
# multipart/mixed starts with a boundary, then the first part.
if not self.seen_first:
self.read_until_boundary()
self.advance_buffer()
self.seen_first = True
self.read_until_boundary()
if self.next_boundary:
part = self.advance_buffer()
message = message_from_string(part)
return message
else:
raise StopIteration
def try_match(self):
self.next_boundary = self.boundary_re.search(self.buffer)
return self.next_boundary
def advance_buffer(self):
part = self.buffer[:self.next_boundary.start()]
self.buffer = self.buffer[self.next_boundary.end():]
self.next_boundary = None
return part
def read_until_boundary(self):
while not self.try_match() and not self.response_done:
self._read()
class RiakHttpMapReduceStream(RiakHttpMultipartStream):
"""
Streaming iterator for MapReduce over HTTP
"""
def next(self):
message = super(RiakHttpMapReduceStream, self).next()
payload = json.loads(message.get_payload())
return payload['phase'], payload['data']
class RiakHttpIndexStream(RiakHttpMultipartStream):
"""
Streaming iterator for secondary indexes over HTTP
"""
def __init__(self, response, index, return_terms):
super(RiakHttpIndexStream, self).__init__(response)
self.index = index
self.return_terms = return_terms
def next(self):
message = super(RiakHttpIndexStream, self).next()
payload = json.loads(message.get_payload())
if u'keys' in payload:
return payload[u'keys']
elif u'results' in payload:
structs = payload[u'results']
# Format is {"results":[{"2ikey":"primarykey"}, ...]}
return [self._decode_pair(d.items()[0]) for d in structs]
elif u'continuation' in payload:
return CONTINUATION(payload[u'continuation'])
def _decode_pair(self, pair):
return (decode_index_value(self.index, pair[0]), pair[1])
| {
"content_hash": "6f4f329f6fc1a4f065fe5a49dd7d282f",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 70,
"avg_line_length": 28.715151515151515,
"alnum_prop": 0.6274799493457155,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "e992bbecbe9ac7b835f783511f88e704438c71bf",
"size": "4738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset/python/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
from feedly.feeds.aggregated_feed.redis import RedisAggregatedFeed
from feedly.tests.feeds.aggregated_feed.base import TestAggregatedFeed,\
implementation
from feedly.activity import AggregatedActivity
from feedly.tests.feeds.redis import CustomActivity
class CustomAggregated(AggregatedActivity):
pass
class RedisCustomAggregatedFeed(RedisAggregatedFeed):
activity_class = CustomActivity
aggregated_activity_class = CustomAggregated
class TestRedisAggregatedFeed(TestAggregatedFeed):
feed_cls = RedisAggregatedFeed
class TestRedisCustomAggregatedFeed(TestAggregatedFeed):
feed_cls = RedisCustomAggregatedFeed
activity_class = CustomActivity
aggregated_activity_class = CustomAggregated
@implementation
def test_custom_activity(self):
assert self.test_feed.count() == 0
self.feed_cls.insert_activity(
self.activity
)
self.test_feed.add(self.activity)
assert self.test_feed.count() == 1
aggregated = self.test_feed[:10][0]
assert type(aggregated) == self.aggregated_activity_class
assert type(aggregated.activities[0]) == self.activity_class
| {
"content_hash": "8230b70c0e5ac2de6ac39c6c1ecf177d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 32.388888888888886,
"alnum_prop": 0.7487135506003431,
"repo_name": "izhan/Stream-Framework",
"id": "c0a2eac24114de1bd635a65745612a76ec3ba7ba",
"size": "1166",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "feedly/tests/feeds/aggregated_feed/redis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "86131"
},
{
"name": "Nginx",
"bytes": "1796"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Puppet",
"bytes": "76558"
},
{
"name": "Python",
"bytes": "542714"
},
{
"name": "Ruby",
"bytes": "259164"
},
{
"name": "Shell",
"bytes": "8427"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from inventory.models import Jar
from pyment import settings
from django.utils import timezone
from datetime import timedelta
class Command(BaseCommand):
help = "Delete inactive jars more than %d days old" % settings.INACTIVE_JAR_AGE_DAYS
def add_arguments(self, parser):
parser.add_argument("--dry-run", action="store_true", dest="dryrun", default=False, help="Simulate the command")
def handle(self, **options):
self.stdout.write("Removing inactive jars\n")
# calculate date of INACTIVE_JAR_AGE_DAYS days ago
remove_before = timezone.now() + timedelta(days=-settings.INACTIVE_JAR_AGE_DAYS)
old_jars = Jar.objects.filter(is_active=False, updated_at__lt=remove_before)
if not options["dryrun"]:
old_jars.delete()
self.stdout.write("%d jars were removed\n" % len(old_jars))
| {
"content_hash": "b9aa0e57191222a50399049505454560",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 120,
"avg_line_length": 43.285714285714285,
"alnum_prop": 0.7007700770077008,
"repo_name": "mathuin/pyment",
"id": "f33b11e4a30b108e1e39968b48db08951c485868",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "django/inventory/management/commands/delete_old_jars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7253"
},
{
"name": "CSS",
"bytes": "3691"
},
{
"name": "Dockerfile",
"bytes": "325"
},
{
"name": "HTML",
"bytes": "17986"
},
{
"name": "JavaScript",
"bytes": "1995"
},
{
"name": "Makefile",
"bytes": "7418"
},
{
"name": "Python",
"bytes": "239563"
},
{
"name": "Shell",
"bytes": "4610"
}
],
"symlink_target": ""
} |
from pygw.base import GeoWaveObject
from pygw.base.type_conversions import StringArrayType
from .query_constraints_factory import QueryConstraintsFactory
from .query_hint_key import QueryHintKey
from .query_constraints import QueryConstraints
from ..base.java_transformer import NoOpTransformer
class BaseQueryBuilder(GeoWaveObject):
"""
A base class for building queries. This class should not be used directly. Instead, use one of the derived classes
such as `pygw.query.vector.VectorQueryBuilder` or `pygw.query.vector.VectorAggregationQueryBuilder`.
"""
def __init__(self, java_ref, java_transformer=NoOpTransformer()):
self._java_transformer = java_transformer
super().__init__(java_ref)
def constraints_factory(self):
"""
Creates a constraints factory for the type of query that is being built.
Returns:
An appropriate `pygw.query.QueryConstraintsFactory` for the query type.
"""
return QueryConstraintsFactory(self._java_ref.constraints_factory())
def all_indices(self):
"""
Configure the query to allow the use of all indices when getting data.
This is the default.
Returns:
This query builder.
"""
self._java_ref.allIndices()
return self
def index_name(self, index_name):
"""
Configure the query to get data from a specific index.
Args:
index_name (str): The name of the index to get data from.
Returns:
This query builder.
"""
self._java_ref.indexName(index_name)
return self
def add_authorization(self, authorization):
"""
Configure the query to get data using the given authorization.
Args:
authorization (str): The authorization to use in the query.
Returns:
This query builder.
"""
self._java_ref.addAuthorization(authorization)
return self
def set_authorizations(self, authorizations):
"""
Configure the query to get data using the given set of authorizations.
Args:
authorizations (list of str): The authorizations to use in the query.
Returns:
This query builder.
"""
self._java_ref.setAuthorizations(StringArrayType().to_java(authorizations))
return self
def no_authorizations(self):
"""
Configure the query to get data without using any authorizations. This
is the default.
Returns:
This query builder.
"""
self._java_ref.noAuthorizations()
return self
def no_limit(self):
"""
Configure the query to get all results that match the query constraints.
This is the default.
Returns:
This query builder.
"""
self._java_ref.noLimit()
return self
def limit(self, limit):
"""
Configure the query to only return a limited number of results.
Args:
limit (int): The maximum number of results to get.
Returns:
This query builder.
"""
self._java_ref.limit(limit)
return self
def add_hint(self, key, value):
"""
Adds a hint to the query. Available query hints are defined by the
enumeration at `pygw.query.query_hint_key.QueryHintKey`.
Args:
key (pygw.query.query_hint_key.QueryHintKey): The key of the hint to set.
value (any): The value to use for the hint.
Returns:
This query builder.
"""
assert isinstance(key, QueryHintKey)
self._java_ref.addHint(QueryHintKey.get_key(key), value)
return self
def no_hints(self):
"""
Configure the query to use no query hints. This is the default.
Returns:
This query builder.
"""
self._java_ref.noHints()
return self
def constraints(self, constraints):
"""
Configure the query to be constrained by the given query constraints.
Constraints can be constructed using the factory provided by the
`pygw.query.query_builder.QueryBuilder.constraints_factory` method.
Args:
constraints (pygw.query.query_constraints.QueryConstraints): The constraints to use.
Returns:
This query builder.
"""
assert isinstance(constraints, QueryConstraints)
self._java_ref.constraints(constraints._java_ref)
return self
def build(self):
"""
Builds the configured query.
Raises:
NotImplementedError: This should be overridden by derived query builders.
Returns:
The constructed query.
"""
raise NotImplementedError
| {
"content_hash": "61d56f46319fa3277ba6edc4bdc74f04",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 120,
"avg_line_length": 30.641509433962263,
"alnum_prop": 0.6151477832512315,
"repo_name": "ngageoint/geowave",
"id": "cd634f733305987dfa7892d243fb4c8e02115844",
"size": "5392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/main/python/pygw/query/base_query_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5073"
},
{
"name": "CMake",
"bytes": "2032"
},
{
"name": "FreeMarker",
"bytes": "2879"
},
{
"name": "Gnuplot",
"bytes": "57750"
},
{
"name": "Groovy",
"bytes": "1323"
},
{
"name": "HTML",
"bytes": "1903"
},
{
"name": "Java",
"bytes": "7103026"
},
{
"name": "Protocol Buffer",
"bytes": "1525"
},
{
"name": "Puppet",
"bytes": "4039"
},
{
"name": "Scala",
"bytes": "26507"
},
{
"name": "Scheme",
"bytes": "20491"
},
{
"name": "Shell",
"bytes": "68381"
}
],
"symlink_target": ""
} |
from unittest import mock
import pytest
from saleor.payment import PaymentError
from saleor.payment.gateways.adyen.utils.apple_pay import (
initialize_apple_pay_session,
validate_payment_data_for_apple_pay,
)
@pytest.mark.parametrize(
"validation_url,merchant_identifier,domain,display_name,certificate",
[
(
"https://apple-pay-gateway.apple.com/paymentservices/startSession",
"merchant.com.identifier",
"saleor.com",
None,
"certifiate data",
),
(None, "merchant.com.identifier", "saleor.com", "Saleor", "certifiate data"),
(
"https://apple-pay-gateway.apple.com/paymentservices/startSession",
None,
"saleor.com",
"Saleor",
"certifiate data",
),
(
"https://apple-pay-gateway.apple.com/paymentservices/startSession",
"merchant.com.identifier",
None,
"Saleor",
"certifiate data",
),
(
"https://not-whitelisted-domain.com/paymentservices/startSession",
"merchant.com.identifier",
"saleor.com",
"Saleor",
"certifiate data",
),
(
"https://apple-pay-gateway.apple.com/paymentservices/startSession",
"merchant.com.identifier",
"saleor.com",
"Saleor",
None,
),
],
)
def test_validate_payment_data_for_apple_pay_raises_payment_error(
validation_url, merchant_identifier, domain, display_name, certificate
):
with pytest.raises(PaymentError):
validate_payment_data_for_apple_pay(
validation_url, merchant_identifier, domain, display_name, certificate
)
def test_validate_payment_data_for_apple_pay():
validation_url = "https://apple-pay-gateway.apple.com/paymentservices/startSession"
merchant_identifier = "merchant.com.identifier"
domain = "saleor.com"
display_name = "Saleor "
certificate = "certifiate data"
validate_payment_data_for_apple_pay(
validation_url, merchant_identifier, domain, display_name, certificate
)
@mock.patch("saleor.payment.gateways.adyen.utils.apple_pay.NamedTemporaryFile")
@mock.patch("saleor.payment.gateways.adyen.utils.apple_pay.requests.post")
def test_initialize_payment_for_apple_pay(mocked_request, mocked_tmp_file):
mocked_cert_file_name = "cert-file-name"
mocked_file = mock.MagicMock()
mocked_file.__enter__.return_value = mocked_file
mocked_file.name = mocked_cert_file_name
mocked_tmp_file.return_value = mocked_file
mocked_response = mock.Mock()
mocked_response.ok = True
mocked_response.json.return_value = {
"epochTimestamp": 1604652056653,
"expiresAt": 1604655656653,
"merchantSessionIdentifier": "SSH5EFCB46BA25C4B14B3F37795A7F5B974_BB8E",
}
mocked_request.return_value = mocked_response
validation_url = "https://apple-pay-gateway.apple.com/paymentservices/startSession"
merchant_identifier = "merchant.com.identifier"
domain = "saleor.com"
display_name = "Saleor Shop"
certificate = "certifiate data"
initialize_apple_pay_session(
validation_url,
merchant_identifier,
domain,
display_name,
certificate,
)
expected_data = {
"merchantIdentifier": merchant_identifier,
"displayName": display_name,
"initiative": "web",
"initiativeContext": domain,
}
mocked_request.assert_called_with(
validation_url, json=expected_data, cert=mocked_cert_file_name
)
@mock.patch("saleor.payment.gateways.adyen.utils.apple_pay.requests.post")
def test_initialize_payment_for_apple_pay_request_failed(mocked_request):
mocked_response = mock.Mock()
mocked_response.ok = False
mocked_response.json.return_value = {}
mocked_request.return_value = mocked_response
validation_url = "https://apple-pay-gateway.apple.com/paymentservices/startSession"
merchant_identifier = "merchant.com.identifier"
domain = "saleor.com"
display_name = "Saleor Shop"
certificate = "certifiate data"
with pytest.raises(PaymentError):
initialize_apple_pay_session(
validation_url,
merchant_identifier,
domain,
display_name,
certificate,
)
| {
"content_hash": "acd894595341b922b32f057a911827f4",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 87,
"avg_line_length": 31.884892086330936,
"alnum_prop": 0.6389891696750902,
"repo_name": "mociepka/saleor",
"id": "1f0c2ff0cc889d6897096a922ac1c2e8ee8a5a66",
"size": "4432",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "saleor/payment/gateways/adyen/tests/utils/test_apple_pay.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""Tests for the CoinMarketCap sensor platform."""
import json
import unittest
from unittest.mock import patch
import homeassistant.components.sensor as sensor
from homeassistant.setup import setup_component
from tests.common import (
get_test_home_assistant, load_fixture, assert_setup_component)
VALID_CONFIG = {
'platform': 'coinmarketcap',
'currency': 'ethereum',
'display_currency': 'EUR',
}
class TestCoinMarketCapSensor(unittest.TestCase):
"""Test the CoinMarketCap sensor."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = VALID_CONFIG
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('coinmarketcap.Market.ticker',
return_value=json.loads(load_fixture('coinmarketcap.json')))
def test_setup(self, mock_request):
"""Test the setup with custom settings."""
with assert_setup_component(1, sensor.DOMAIN):
assert setup_component(self.hass, sensor.DOMAIN, {
'sensor': VALID_CONFIG})
state = self.hass.states.get('sensor.ethereum')
assert state is not None
assert state.state == '240.47'
assert state.attributes.get('symbol') == 'ETH'
assert state.attributes.get('unit_of_measurement') == 'EUR'
| {
"content_hash": "354e19ff66b6e7bb9cde6255702e20c9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 71,
"avg_line_length": 31.59090909090909,
"alnum_prop": 0.660431654676259,
"repo_name": "stefan-jonasson/home-assistant",
"id": "15c254bfb274aba88ff628f08ccf05e8cac9f9e6",
"size": "1390",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "tests/components/sensor/test_coinmarketcap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
} |
'''
Using Turtle Graphics to draw a blue polygon with customizable number and
length of sides according to user's input.
'''
import turtle
sides = int(input("Number of sides: "))
distance = int(input("Side's length: "))
turtle.color('blue') # Set the pen's color to blue
turtle.pendown() # Start drawing
deg = 360 / sides # Calculate the turn
for i in range(sides):
turtle.forward(distance) # By using loop, turn and
turtle.left(deg) # move the turtle forward
turtle.hideturtle() # Hide the turtle/arrow
turtle.exitonclick() # Close the window on click event
| {
"content_hash": "23d16410eb7ab3a97c6b0157aa283363",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 30.9,
"alnum_prop": 0.6699029126213593,
"repo_name": "laymonage/TarungLab",
"id": "5bc187c28dface4b47f67c7239f157a58ae0f76a",
"size": "618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lab/01/lab01_f.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30286"
}
],
"symlink_target": ""
} |
'''Forced Choice App
=====================
The main module that starts the experiment.
'''
from functools import partial
from os.path import join, dirname, isdir
from cplcom.moa.app import ExperimentApp, run_app as run_cpl_app
from kivy.properties import ObjectProperty
from kivy.resources import resource_add_path
from kivy.uix.behaviors.knspace import knspace
from kivy.garden.filebrowser import FileBrowser
from kivy.lang import Builder
import forced_choice.graphics
import forced_choice.stages
__all__ = ('ForcedChoiceApp', 'run_app')
class ForcedChoiceApp(ExperimentApp):
'''The app which runs the experiment.
'''
def __init__(self, **kwargs):
super(ForcedChoiceApp, self).__init__(**kwargs)
Builder.load_file(join(dirname(__file__), 'Experiment.kv'))
def clean_up_root_stage(self):
super(ForcedChoiceApp, self).clean_up_root_stage()
knspace.gui_start_stop.state = 'normal'
run_app = partial(run_cpl_app, ForcedChoiceApp)
'''The function that starts the experiment GUI and the entry point for
the main script.
'''
if __name__ == '__main__':
run_app()
| {
"content_hash": "0738221a38ae9294684e5b8b0b1ac489",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7026785714285714,
"repo_name": "matham/forced_choice",
"id": "0eba926f1a86c4b3be90088cfc926ddaf5f33f77",
"size": "1120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forced_choice/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66053"
}
],
"symlink_target": ""
} |
"""
Spatial kernels for dwarf galaxies.
"""
import sys
import inspect
from abc import abstractmethod
from collections import OrderedDict as odict
import copy
import numpy as np
import healpy as hp
import scipy.integrate
import scipy.interpolate
import ugali.utils.projector
from ugali.utils.projector import Projector, angsep
from ugali.analysis.model import Model, Parameter
from ugali.utils.healpix import ang2vec, ang2pix, query_disc, ang2disc
from ugali.utils.logger import logger
#ADW: WARNING some funky stuff may be happening at the origin (0,0)
class Kernel(Model):
"""
Base class for kernels.
"""
_params = odict([
('lon', Parameter(0.0, [0.0, 360. ])),
('lat', Parameter(0.0, [-90., 90. ])),
])
_mapping = odict([])
_proj = 'ait'
def __init__(self, proj='ait', **kwargs):
# This __init__ is probably not necessary...
self.proj = proj
super(Kernel,self).__init__(**kwargs)
def __call__(self, lon, lat):
"""
Return the value of the pdf at a give location.
Parameters
----------
lon : longitude (deg)
lat : latitude (deg)
Returns
-------
pdf : normalized, truncated pdf
"""
return self.pdf(lon, lat)
@abstractmethod
def _kernel(self, r):
"""Unnormalized, untruncated kernel"""
pass
def _pdf(self, radius):
"""Unnormalized, truncated kernel"""
return np.where(radius<=self.edge, self._kernel(radius), 0.)
@abstractmethod
def pdf(self, lon, lat):
"""Normalized, truncated pdf"""
pass
@property
def norm(self):
"""Normalization from the integated pdf"""
return 1./self.integrate()
@property
def projector(self):
"""Projector used to transform to local sky coordinates."""
if self.proj is None or self.proj.lower()=='none':
return None
else:
return Projector(self.lon, self.lat, self.proj)
def integrate(self, rmin=0, rmax=np.inf):
"""
Calculate the 2D integral of the 1D surface brightness profile
(i.e, the flux) between rmin and rmax (elliptical radii).
Parameters:
-----------
rmin : minimum integration radius (deg)
rmax : maximum integration radius (deg)
Returns:
--------
integral : Solid angle integral (deg^2)
"""
if rmin < 0: raise Exception('rmin must be >= 0')
integrand = lambda r: self._pdf(r) * 2*np.pi * r
return scipy.integrate.quad(integrand,rmin,rmax,full_output=True,epsabs=0)[0]
class ToyKernel(Kernel):
"""
Simple toy kernel that selects healpix pixels within
the given extension radius. Similar to 'RadialDisk'.
"""
_params = odict(
list(Kernel._params.items()) +
[
('extension', Parameter(0.1, [0.0001,5.0]) ),
('nside', Parameter(4096,[4096,4096])),
])
def _cache(self, name=None):
pixel_area = hp.nside2pixarea(self.nside,degrees=True)
vec = ang2vec(self.lon, self.lat)
#self.pix = query_disc(self.nside,vec,self.extension)
self.pix = ang2disc(self.nside,self.lon,self.lat,self.extension,inclusive=True)
self._norm = 1./(len(self.pix)*pixel_area)
@property
def norm(self):
return self._norm
def _pdf(self,pix):
return np.in1d(pix,self.pix)
def pdf(self,lon,lat):
pix = ang2pix(self.nside,lon,lat)
return self.norm * self._pdf(pix)
class EllipticalKernel(Kernel):
"""
Base class for elliptical kernels.
Ellipticity is defined as 1 - b/a where a,b are the semi-major,semi-minor
axes respectively. The position angle is defined in degrees east of north.
This definition follows from Martin et al. 2008:
http://adsabs.harvard.edu/abs/2008ApJ...684.1075M
### This is a depricated warning (2015/08/12)
### ADW: WARNING!!! This is actually the PA *WEST* of North!
### to get the conventional PA EAST of North take 90-PA
### Documentation?!?!
"""
_params = odict(
list(Kernel._params.items()) +
[
# This is the semi-major axis
('extension', Parameter(0.1, [0.0001,0.5]) ),
# This is e = 1 - b/a (0 for RadialKernel)
('ellipticity', Parameter(0.0, [0.0, 0.99]) ),
# This is the PA *WEST* of North (0 for RadialKernel)
# to get the conventional PA EAST of North take 90-PA
# Would it be better to have bounds [-90,90]?
('position_angle',Parameter(0.0, [0.0, 180.0]) ), #
])
_mapping = odict([
('e','ellipticity'),
('theta','position_angle'),
])
@property
def norm(self):
norm = super(EllipticalKernel,self).norm
return norm * 1./self.jacobian
@property
def jacobian(self):
return 1. - self.e
@property
def a(self):
return self.extension
@property
def b(self):
return self.a*self.jacobian
@property
def edge(self):
return 5.*self.extension
def angsep(self,lon,lat):
return angsep(self.lon,self.lat,lon,lat)
def radius(self,lon,lat):
x,y = self.projector.sphereToImage(lon,lat)
costh = np.cos(np.radians(self.theta))
sinth = np.sin(np.radians(self.theta))
return np.sqrt(((x*costh-y*sinth)/(1-self.e))**2 + (x*sinth+y*costh)**2)
def pdf(self,lon,lat):
radius = self.radius(lon,lat)
return self.norm*self._pdf(radius)
def sample_radius(self, n):
"""
Sample the radial distribution (deg) from the 2D stellar density.
Output is elliptical radius in true projected coordinates.
Parameters
----------
n : number of stars to sample
Returns
-------
radius : distance from centroid (deg)
"""
size = int(n)
edge = self.edge if self.edge<20*self.extension else 20*self.extension
radius = np.linspace(0, edge, int(10**5))
pdf = self._pdf(radius) * np.sin(np.radians(radius))
cdf = np.cumsum(pdf)
cdf /= cdf[-1]
fn = scipy.interpolate.interp1d(cdf, list(range(0, len(cdf))))
index = np.floor(fn(np.random.uniform(size=size))).astype(int)
return radius[index]
def sample_lonlat(self, n):
"""
Sample 2D distribution of points in lon, lat
Parameters
----------
n : number of stars to sample
Returns
-------
lon,lat : longitude and latitude (deg)
"""
# From http://en.wikipedia.org/wiki/Ellipse#General_parametric_form
# However, Martin et al. (2009) use PA theta "from North to East"
# Definition of phi (position angle) is offset by pi/4
# Definition of t (eccentric anamoly) remains the same (x,y-frame usual)
# In the end, everything is trouble because we use glon, glat...
radius = self.sample_radius(n)
a = radius; b = self.jacobian * radius
t = 2. * np.pi * np.random.rand(n)
cost,sint = np.cos(t),np.sin(t)
phi = np.pi/2. - np.deg2rad(self.theta)
cosphi,sinphi = np.cos(phi),np.sin(phi)
x = a*cost*cosphi - b*sint*sinphi
y = a*cost*sinphi + b*sint*cosphi
if self.projector is None:
logger.debug("Creating AITOFF projector for sampling")
projector = Projector(self.lon,self.lat,'ait')
else:
projector = self.projector
lon, lat = projector.imageToSphere(x, y)
return lon, lat
simulate = sample_lonlat
sample = sample_lonlat
# Back-compatibility
def setExtension(self,extension):
self.extension = extension
def setCenter(self,lon,lat):
self.lon = lon
self.lat = lat
class EllipticalDisk(EllipticalKernel):
"""
Simple uniform disk kernel for testing.
f(r) = 1 for r <= r_0
f(r) = 0 for r > r_0
"""
_params = EllipticalKernel._params
_mapping = odict(
list(EllipticalKernel._mapping.items()) +
[
('r_0','extension')
])
### ADW: stellar mass conversion?
def _kernel(self, radius):
return np.where(radius<=self.r_0, 1.0, 0.0)
@property
def norm(self):
return 1./(np.pi*self.r_0**2 * self.jacobian)
class EllipticalGaussian(EllipticalKernel):
"""
Simple Gaussian kernel for testing:
f(r) = C * exp(-r**2 / 2*sigma**2)
"""
_params = EllipticalKernel._params
_mapping = odict(
list(EllipticalKernel._mapping.items()) +
[
('sigma','extension')
])
### ADW: stellar mass conversion?
def _kernel(self, radius):
return np.exp(-radius**2/(2*self.sigma**2))
@property
def norm(self):
# Analytic integral from 0 to edge
return 1./(2*np.pi*self.sigma**2*(1-np.exp(-self.edge**2/(2*self.sigma**2)))*self.jacobian)
class EllipticalExponential(EllipticalKernel):
"""
Stellar density distribution for Exponential profile:
f(r) = C * exp(-r / r_e)
where r_e = r_h/1.68
http://adsabs.harvard.edu//abs/2006MNRAS.365.1263M (Eq. 5)
"""
_params = odict(EllipticalKernel._params)
_mapping = odict(
list(EllipticalKernel._mapping.items()) +
[
('r_h','extension'), # Half-light radius
])
def _kernel(self,radius):
return np.exp(-radius/self.r_e)
@property
def norm(self):
# Analytic integral
xedge = self.edge/self.r_e
return 1./(2*np.pi*self.r_e**2*(1 - (xedge+1)*np.exp(-xedge))*self.jacobian)
@property
def r_e(self):
# Exponential scale radius
return self.r_h/1.68
@property
def edge(self):
return 20.*self.r_h
class EllipticalPlummer(EllipticalKernel):
"""
Stellar density distribution for Plummer profile:
f(r) = C * r_c**2 / (r_c**2 + r**2)**2
http://adsabs.harvard.edu//abs/2006MNRAS.365.1263M (Eq. 6)
"""
_params = odict(
list(EllipticalKernel._params.items()) +
[
('truncate', Parameter(3.0, [0.0, np.inf]) ), # Truncation radius
])
_mapping = odict(
list(EllipticalKernel._mapping.items()) +
[
('r_c','extension'), # Plummer radius
('r_h','extension'), # ADW: Depricated
('r_t','truncate'), # Tidal radius
])
def _kernel(self, radius):
return 1./(np.pi*self.r_h**2 * (1.+(radius/self.r_h)**2)**2)
def _cache(self, name=None):
if name in [None,'extension','ellipticity','truncate']:
self._norm = 1./self.integrate() * 1./self.jacobian
else:
return
@property
def norm(self):
return self._norm
@property
def u_t(self):
# Truncation factor
return self.truncate/self.extension
@property
def edge(self):
return self.r_t
class EllipticalKing(EllipticalKernel):
"""
Stellar density distribution for King profile:
f(r) = C * [ 1/sqrt(1 + (r/r_c)**2) - 1/sqrt(1 + (r_t/r_c)**2) ]**2
http://adsabs.harvard.edu//abs/2006MNRAS.365.1263M (Eq. 4)
The half-light radius is related to the King radius for
c = log10(r_t/r_c) = 0.7 :
r_h = 1.185 * r_c
http://adsabs.harvard.edu/abs/2010MNRAS.406.1220W (App.B)
"""
_params = odict(
list(EllipticalKernel._params.items()) +
[
('truncate', Parameter(3.0, [0.0, np.inf]) ), # Truncation radius
])
_mapping = odict(
list(EllipticalKernel._mapping.items()) +
[
('r_c','extension'), # Core radius
('r_t','truncate'), # Tidal radius
])
def _kernel(self, radius):
return ((1./np.sqrt(1.+(radius/self.r_c)**2))-(1./np.sqrt(1.+(self.r_t/self.r_c)**2)))**2
def _cache(self, name=None):
if name in [None,'extension','ellipticity','truncate']:
self._norm = 1./self.integrate()
else:
return
@property
def norm(self):
return self._norm
@property
def c(self):
return np.log10(self.r_t/self.r_c)
@property
def edge(self):
return self.r_t
#####################################################
### Radial Kernels
#####################################################
class RadialKernel(EllipticalKernel):
"""
Radial kernel subclass fixing ellipticity and
position angle to zero.
"""
_frozen_params = ['ellipticity','position_angle']
def __init__(self,**kwargs):
# This is a bit messy because the defaults are set
# at the instance level not at the class level
self._params = copy.deepcopy(self._params)
def frozen(x):
if x: raise Exception("Parameter is frozen")
self._params['ellipticity'].set(0, [0, 0], False)
self._params['ellipticity'].set_free = frozen
self._params['position_angle'].set(0, [0, 0], False)
self._params['ellipticity'].set_free = frozen
#logger.warning("Setting bounds on extension")
#self._params['extension'].set(0.1, [1e-4, 0.1])
super(RadialKernel,self).__init__(**kwargs)
def pdf(self, lon, lat):
if self.projector is None:
radius = angsep(self.lon,self.lat,lon,lat)
return self.norm*self._pdf(radius)
else:
return super(RadialKernel,self).pdf(lon,lat)
# Back-compatibility
def surfaceIntensity(self,radius):
return self.norm*self._pdf(radius)
# For fast access...
class RadialDisk(RadialKernel,EllipticalDisk): pass
class RadialExponential(RadialKernel,EllipticalExponential): pass
class RadialGaussian(RadialKernel,EllipticalGaussian): pass
class RadialPlummer(RadialKernel,EllipticalPlummer): pass
class RadialKing(RadialKernel,EllipticalKing): pass
Disk = RadialDisk
Gaussian = RadialGaussian
Exponential = RadialExponential
Plummer = RadialPlummer
King = RadialKing
### def kernelFactory2(name, **kwargs):
### """
### Factory for creating spatial kernels. Arguments
### are passed directly to the constructor of the chosen
### kernel.
### """
### fn = lambda member: inspect.isclass(member) and member.__module__==__name__
### kernels = odict(inspect.getmembers(sys.modules[__name__], fn))
###
### if name not in kernels.keys():
### msg = "%s not found in kernels:\n %s"%(name,kernels.keys())
### logger.error(msg)
### msg = "Unrecognized kernel: %s"%name
### raise Exception(msg)
###
### return kernels[name](**kwargs)
###
###
### def kernelFactory(name, **kwargs):
### """
### Factory for creating spatial kernels. Arguments
### are passed directly to the constructor of the chosen
### kernel.
### """
### fn = lambda member: inspect.isclass(member) and member.__module__==__name__
### kernels = odict(inspect.getmembers(sys.modules[__name__], fn))
###
### if name not in kernels.keys():
### msg = "%s not found in kernels:\n %s"%(name,kernels.keys())
### logger.error(msg)
### msg = "Unrecognized kernel: %s"%name
### raise Exception(msg)
###
### return kernels[name](**kwargs)
#ADW: Should change 'name' to 'type' (but lots of legacy to deal with)
def factory(name, **kwargs):
from ugali.utils.factory import factory
return factory(name, module=__name__, **kwargs)
kernelFactory = factory
if __name__ == "__main__":
import argparse
description = "python script"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('args',nargs=argparse.REMAINDER)
opts = parser.parse_args(); args = opts.args
| {
"content_hash": "4be528678ca85ee550d1f73e1c7456f2",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 99,
"avg_line_length": 30.599236641221374,
"alnum_prop": 0.5743420232006985,
"repo_name": "DarkEnergySurvey/ugali",
"id": "02f8f094a9c644bec386146a54708833ba338d76",
"size": "16056",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ugali/analysis/kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "355304"
},
{
"name": "Python",
"bytes": "949638"
}
],
"symlink_target": ""
} |
"""Celko's "Nested Sets" Tree Structure.
http://www.intelligententerprise.com/001020/celko.jhtml
"""
from sqlalchemy import case
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import Session
Base = declarative_base()
class Employee(Base):
__tablename__ = "personnel"
__mapper_args__ = {
"batch": False # allows extension to fire for each
# instance before going to the next.
}
parent = None
emp = Column(String, primary_key=True)
left = Column("lft", Integer, nullable=False)
right = Column("rgt", Integer, nullable=False)
def __repr__(self):
return "Employee(%s, %d, %d)" % (self.emp, self.left, self.right)
@event.listens_for(Employee, "before_insert")
def before_insert(mapper, connection, instance):
if not instance.parent:
instance.left = 1
instance.right = 2
else:
personnel = mapper.mapped_table
right_most_sibling = connection.scalar(
select([personnel.c.rgt]).where(
personnel.c.emp == instance.parent.emp
)
)
connection.execute(
personnel.update(personnel.c.rgt >= right_most_sibling).values(
lft=case(
[
(
personnel.c.lft > right_most_sibling,
personnel.c.lft + 2,
)
],
else_=personnel.c.lft,
),
rgt=case(
[
(
personnel.c.rgt >= right_most_sibling,
personnel.c.rgt + 2,
)
],
else_=personnel.c.rgt,
),
)
)
instance.left = right_most_sibling
instance.right = right_most_sibling + 1
# before_update() would be needed to support moving of nodes
# after_delete() would be needed to support removal of nodes.
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(bind=engine)
albert = Employee(emp="Albert")
bert = Employee(emp="Bert")
chuck = Employee(emp="Chuck")
donna = Employee(emp="Donna")
eddie = Employee(emp="Eddie")
fred = Employee(emp="Fred")
bert.parent = albert
chuck.parent = albert
donna.parent = chuck
eddie.parent = chuck
fred.parent = chuck
# the order of "add" is important here. elements must be added in
# the order in which they should be INSERTed.
session.add_all([albert, bert, chuck, donna, eddie, fred])
session.commit()
print(session.query(Employee).all())
# 1. Find an employee and all their supervisors, no matter how deep the tree.
ealias = aliased(Employee)
print(
session.query(Employee)
.filter(ealias.left.between(Employee.left, Employee.right))
.filter(ealias.emp == "Eddie")
.all()
)
# 2. Find the employee and all their subordinates.
# (This query has a nice symmetry with the first query.)
print(
session.query(Employee)
.filter(Employee.left.between(ealias.left, ealias.right))
.filter(ealias.emp == "Chuck")
.all()
)
# 3. Find the level of each node, so you can print the tree
# as an indented listing.
for indentation, employee in (
session.query(func.count(Employee.emp).label("indentation") - 1, ealias)
.filter(ealias.left.between(Employee.left, Employee.right))
.group_by(ealias.emp)
.order_by(ealias.left)
):
print(" " * indentation + str(employee))
| {
"content_hash": "ddc07ce345f9e91330af998d9c07efd6",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 77,
"avg_line_length": 28.2,
"alnum_prop": 0.6109797741003414,
"repo_name": "wujuguang/sqlalchemy",
"id": "ba45231ceeb8ba255ec3eda0bbde77801dd31ced",
"size": "3807",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/nested_sets/nested_sets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45930"
},
{
"name": "Python",
"bytes": "11287383"
}
],
"symlink_target": ""
} |
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup, Extension
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
# Build and use a C++ extension for faster masking. SWIG is required.
_USE_FAST_MASKING = False
if sys.version < '2.3':
print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
if _USE_FAST_MASKING:
setup(ext_modules=[
Extension(
'mod_pywebsocket/_fast_masking',
['mod_pywebsocket/fast_masking.i'],
swig_opts=['-c++'])])
setup(author='Yuzo Fujishima',
author_email='[email protected]',
description='WebSocket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'the WebSocket Protocol (RFC 6455). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See LICENSE',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
# See the source of distutils.version, distutils.versionpredicate and
# distutils.dist to understand how to name version numbers.
version='0.7.9',
)
# vi:sts=4 sw=4 et
| {
"content_hash": "455b8c5e0ab226083ced1e0a1e9d0182",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6058059587471352,
"repo_name": "Teamxrtc/webrtc-streaming-node",
"id": "935d6b105899ca1348d7d6d68bbe2bd49e85d118",
"size": "2863",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "third_party/webrtc/src/chromium/src/third_party/pywebsocket/src/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "44"
},
{
"name": "C++",
"bytes": "221840"
},
{
"name": "HTML",
"bytes": "2383"
},
{
"name": "JavaScript",
"bytes": "37396"
},
{
"name": "Python",
"bytes": "2860"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
} |
from multiverse.mars import *
from multiverse.mars.objects import *
from multiverse.mars.core import *
from multiverse.mars.util import *
from multiverse.server.math import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
World.setTheme("mvfantasy.toc")
| {
"content_hash": "53e0ddeeded0d480ebd4ccdec87e6eed",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 32.6,
"alnum_prop": 0.8067484662576687,
"repo_name": "longde123/MultiversePlatform",
"id": "ff546aa6ea5c34d27d4660973875cfce54c8acf9",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/config/mv_fantasy/global_props.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1148"
},
{
"name": "Batchfile",
"bytes": "56002"
},
{
"name": "C",
"bytes": "2958956"
},
{
"name": "C#",
"bytes": "11292123"
},
{
"name": "C++",
"bytes": "428039"
},
{
"name": "CSS",
"bytes": "107446"
},
{
"name": "Groff",
"bytes": "3653"
},
{
"name": "HTML",
"bytes": "767415"
},
{
"name": "Inno Setup",
"bytes": "2093"
},
{
"name": "Java",
"bytes": "4444010"
},
{
"name": "JavaScript",
"bytes": "115349"
},
{
"name": "Makefile",
"bytes": "35639"
},
{
"name": "Matlab",
"bytes": "2076"
},
{
"name": "Objective-C",
"bytes": "44581"
},
{
"name": "Perl",
"bytes": "6299"
},
{
"name": "Python",
"bytes": "4648545"
},
{
"name": "Scheme",
"bytes": "48864"
},
{
"name": "Shell",
"bytes": "880494"
},
{
"name": "XSLT",
"bytes": "1834"
}
],
"symlink_target": ""
} |
from u2flib_server.attestation.resolvers import create_resolver
from u2flib_server.attestation.metadata import MetadataProvider, Attestation
__all__ = ['create_resolver', 'MetadataProvider', 'Attestation']
| {
"content_hash": "f5fb3833923a56b3be826e65eb2c8254",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 51.75,
"alnum_prop": 0.8115942028985508,
"repo_name": "reqa/pkg-python-u2flib-server",
"id": "99cbbd8c6031a90c61486451336eceedbb351207",
"size": "1579",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "u2flib_server/attestation/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "61656"
}
],
"symlink_target": ""
} |
from day_19 import (parse, count_molecules, steps_to_generate, reverse_dict,
generate_prev)
def test_parse():
stream = "H => HO\nH => OH\nO => HH\n\nHOH\n"
assert parse(stream) == ({"H": ["HO", "OH"], "O": ["HH"]}, "HOH")
assert parse(stream.strip()) == ({"H": ["HO", "OH"], "O": ["HH"]}, "HOH")
def test_count_molecules():
replacements = {"H": ["HO", "OH"], "O": ["HH"], "Al": ["C"]}
assert count_molecules("HOH", replacements) == 4
assert count_molecules("HOHOHO", replacements) == 7
assert count_molecules("HOHC", replacements) == 4
assert count_molecules("AlHOH", replacements) == 5
assert count_molecules("HOAlH", replacements) == 6
def test_steps_to_generate():
replacements = {"e": ["H", "O"], "H": ["HO", "OH"], "O": ["HH"]}
assert steps_to_generate("HOH", replacements) == 3
assert steps_to_generate("HOHOHO", replacements) == 6
def test_reverse_dict():
replacements = {"e": ["H", "O"], "H": ["HO", "OH"], "O": ["HH"]}
assert reverse_dict(replacements) == {"H": ["e"], "O": ["e"],
"HO": ["H"], "OH": ["H"],
"HH": ["O"]}
def test_generate_prev():
replacements = {"e": ["H", "O"], "H": ["HO", "OH"], "O": ["HH"]}
assert generate_prev("HOH", reverse_dict(replacements)) == {"HH"}
assert generate_prev("HH", reverse_dict(replacements)) == {"O"}
assert generate_prev("O", reverse_dict(replacements)) == {"e"}
| {
"content_hash": "586f45a6ef5dbaecb0cb5745d0731899",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 40.513513513513516,
"alnum_prop": 0.5203468979319547,
"repo_name": "masasin/advent_of_code_2015",
"id": "df7a0d5679b6df742a41099aeff88a49ec76cbac",
"size": "1499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_day_19.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nimrod",
"bytes": "2706"
},
{
"name": "Python",
"bytes": "120818"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from past.utils import old_div
from proteus import *
from proteus.default_n import *
try:
from .poisson_3d_tetgen_p import *
except:
from poisson_3d_tetgen_p import *
#steady-state so no time integration
timeIntegration = NoIntegration
#number of output timesteps
nDTout = 1
#finite element spaces
femSpaces = {0:C0_AffineQuadraticOnSimplexWithNodalBasis}
#numerical quadrature choices
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
logEvent("""Mesh generated using: tetgen -%s %s""" % (triangleOptions,domain.polyfile+".poly"))
domain.MeshOptions.triangleOptions="VApq1.35q12feena%e" % (old_div((he**3),6.0),)
#number of levels in mesh
nLevels = 1
#no stabilization or shock capturing
subgridError = None
shockCapturing = None
#nonlinear solver choices
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newton
#linear problem so force 1 iteration allowed
maxNonlinearIts = 1
maxLineSearches = 1
fullNewtonFlag = True
#absolute nonlinear solver residual tolerance
nl_atol_res = 1.0e-8
#relative nonlinear solver convergence tolerance as a function of h
#(i.e., tighten relative convergence test as we refine)
tolFac = 0.0
#matrix type
matrix = SparseMatrix
#convenience flag
parallel = True
if parallel:
multilevelLinearSolver = KSP_petsc4py
#for petsc do things lie
#"-ksp_type cg -pc_type asm -pc_asm_type basic -ksp_atol 1.0e-10 -ksp_rtol 1.0e-10 -ksp_monitor_draw" or
#-pc_type lu -pc_factor_mat_solver_package
#can also set -pc_asm_overlap 2 with default asm type (restrict)
levelLinearSolver = KSP_petsc4py#
#for petsc do things like
#"-ksp_type cg -pc_type asm -pc_asm_type basic -ksp_atol 1.0e-10 -ksp_rtol 1.0e-10 -ksp_monitor_draw" or
#-pc_type lu -pc_factor_mat_solver_package
#can also set -pc_asm_overlap 2 with default asm type (restrict)
#levelLinearSolver = PETSc#
#pick number of layers to use in overlap
nLayersOfOverlapForParallel = 0
#type of partition
parallelPartitioningType = MeshParallelPartitioningTypes.node
#parallelPartitioningType = MeshParallelPartitioningTypes.element
#have to have a numerical flux in parallel
numericalFluxType = ConstantAdvection_Diffusion_SIPG_exterior#Advection_DiagonalUpwind_Diffusion_IIPG_exterior
#for true residual test
linearSolverConvergenceTest = 'r-true'
#to allow multiple models to set different ksp options
#linear_solver_options_prefix = 'poisson_'
linearSmoother = None
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
numericalFluxType = ConstantAdvection_Diffusion_SIPG_exterior
#linear solver parameters
linearSmoother = None
#linear solver relative convergence test
linTolFac = 0.0
#linear solver absolute convergence test
l_atol_res = 1.0e-10
conservativeFlux = None
cfluxtag = None
conservativeFlux = {0:'pwl-bdm2'}
| {
"content_hash": "b4c0f2c98a3f5ddc6309b1694596846a",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 114,
"avg_line_length": 32.977777777777774,
"alnum_prop": 0.7611185983827493,
"repo_name": "erdc/proteus",
"id": "3539d0076f13c2e1079ed34b56afec2b37e13ca5",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proteus/tests/ci/poisson_3d_tetgen_c0p2_n.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2790"
},
{
"name": "Asymptote",
"bytes": "1569"
},
{
"name": "C",
"bytes": "2827957"
},
{
"name": "C++",
"bytes": "7262408"
},
{
"name": "Cython",
"bytes": "154607"
},
{
"name": "Dockerfile",
"bytes": "2738"
},
{
"name": "Fortran",
"bytes": "51671"
},
{
"name": "Jupyter Notebook",
"bytes": "33357"
},
{
"name": "Makefile",
"bytes": "19043"
},
{
"name": "Python",
"bytes": "12534530"
},
{
"name": "Roff",
"bytes": "322"
},
{
"name": "Shell",
"bytes": "14084"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry.testing import system_stub
from telemetry.timeline import trace_data
from telemetry.value import trace
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
self.story_set = story_set
self._cloud_storage_stub = system_stub.Override(trace, ['cloud_storage'])
def tearDown(self):
if self._cloud_storage_stub:
self._cloud_storage_stub.Restore()
self._cloud_storage_stub = None
@property
def pages(self):
return self.story_set.stories
class TestSet(object):
""" A test set that represents a set that contains any key. """
def __contains__(self, key):
return True
class TestDefaultDict(object):
""" A test default dict that represents a dictionary that contains any key
with value |default_value|. """
def __init__(self, default_value):
self._default_value = default_value
self._test_set = TestSet()
def __contains__(self, key):
return key in self._test_set
def __getitem__(self, key):
return self._default_value
def keys(self):
return self._test_set
class ValueTest(TestBase):
def testRepr(self):
v = trace.TraceValue(self.pages[0], trace_data.TraceData({'test': 1}),
important=True, description='desc')
self.assertEquals('TraceValue(http://www.bar.com/, trace)', str(v))
def testAsDictWhenTraceSerializedAndUploaded(self):
tempdir = tempfile.mkdtemp()
try:
v = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
fh = v.Serialize(tempdir)
trace.cloud_storage.SetCalculatedHashesForTesting(
{fh.GetAbsPath(): 123})
bucket = trace.cloud_storage.PUBLIC_BUCKET
cloud_url = v.UploadToCloud(bucket)
d = v.AsDict()
self.assertEqual(d['file_id'], fh.id)
self.assertEqual(d['cloud_url'], cloud_url)
finally:
shutil.rmtree(tempdir)
def testAsDictWhenTraceIsNotSerializedAndUploaded(self):
test_temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
v = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
trace.cloud_storage.SetCalculatedHashesForTesting(
TestDefaultDict(123))
bucket = trace.cloud_storage.PUBLIC_BUCKET
cloud_url = v.UploadToCloud(bucket)
d = v.AsDict()
self.assertEqual(d['cloud_url'], cloud_url)
finally:
if os.path.exists(test_temp_file.name):
test_temp_file.close()
os.remove(test_temp_file.name)
def _IsEmptyDir(path):
return os.path.exists(path) and not os.listdir(path)
class NoLeakedTempfilesTests(TestBase):
def setUp(self):
super(NoLeakedTempfilesTests, self).setUp()
self.temp_test_dir = tempfile.mkdtemp()
self.actual_tempdir = trace.tempfile.tempdir
trace.tempfile.tempdir = self.temp_test_dir
def testNoLeakedTempFileOnImplicitCleanUp(self):
with trace.TraceValue(None, trace_data.TraceData({'test': 1})):
pass
self.assertTrue(_IsEmptyDir(self.temp_test_dir))
def testNoLeakedTempFileWhenUploadingTrace(self):
v = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
v.CleanUp()
self.assertTrue(_IsEmptyDir(self.temp_test_dir))
def tearDown(self):
super(NoLeakedTempfilesTests, self).tearDown()
shutil.rmtree(self.temp_test_dir)
trace.tempfile.tempdir = self.actual_tempdir
| {
"content_hash": "23b0da8fa7dd5936196d67147cce4213",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 30.368,
"alnum_prop": 0.6862486828240253,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "d3481461adafc4770557b2d008d0bf488e41f449",
"size": "3959",
"binary": false,
"copies": "10",
"ref": "refs/heads/test",
"path": "telemetry/telemetry/value/trace_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
from django.test import TestCase
import server.interface.nodes as views
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest
import server.interface.closest as cviews
import server.interface.util as util
from django.test.client import RequestFactory
class NodeTestCase(TestCase):
"""
Test the node interface.
Kind of obsolete due to test_urls, but still useful
"""
def setUp(self):
self.rf = RequestFactory()
def test_get_edge_tuple(self):
util.get_edge_tuple(None, 51.0, 3.8)
def test_get_node(self):
"""
Fuzzy test
Test whether indexing yields the correct response type
"""
node = views.get_node(self.rf.get('/node?index=0'))
self.assertTrue(isinstance(node, HttpResponse))
self.assertFalse(isinstance(node, HttpResponseNotFound))
node = views.get_node(self.rf.get('/node?index=-5'))
self.assertTrue(isinstance(node, HttpResponseNotFound))
def test_get_from(self):
"""
Fuzzy tests.
They test whether the function returns the correct response kind.
"""
node = cviews.get_id_from_pos(
self.rf.get('/node?lat=51.0&lon=3.8'))
self.assertTrue(isinstance(node, HttpResponse))
node = cviews.get_node_from_pos(
self.rf.get('/node?lat=5.1.0&lon=3.8'))
self.assertTrue(isinstance(node, HttpResponseBadRequest))
node = cviews.get_id_from_pos(
self.rf.get('/node?lat=51.0&lon=3.8.0'))
self.assertTrue(isinstance(node, HttpResponseBadRequest))
def test_in_city(self):
"""
Test whether the coordinates are inside/outside the city
"""
self.assertEquals(views.in_city(self.rf.get('/node?lat=51.0&lon=3.8')).content, "true")
self.assertEquals(views.in_city(self.rf.get('/node?lat=51.0&lon=2.8')).content, "false")
self.assertTrue(isinstance(views.in_city(self.rf.get('/node?lat=51.0&lon=3.8.0')), HttpResponseBadRequest))
| {
"content_hash": "44d23d5da82dbcf60b30155105f93f2b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 115,
"avg_line_length": 39.59615384615385,
"alnum_prop": 0.6435162700339971,
"repo_name": "simonneuville/runamic_server",
"id": "dbc52706dee4f21e4e9622b15579243c289240dd",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoserver/server/interface/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76077"
},
{
"name": "HTML",
"bytes": "1289731"
},
{
"name": "JavaScript",
"bytes": "282165"
},
{
"name": "Makefile",
"bytes": "629"
},
{
"name": "Python",
"bytes": "111879"
},
{
"name": "Rust",
"bytes": "54288"
},
{
"name": "Shell",
"bytes": "1588"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import sys
from pants.option.arg_splitter import GLOBAL_SCOPE, ArgSplitter
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.option_value_container import OptionValueContainer
from pants.option.parser_hierarchy import ParserHierarchy, enclosing_scope
from pants.option.scope import ScopeInfo
class Options(object):
"""The outward-facing API for interacting with options.
Supports option registration and fetching option values.
Examples:
The value in global scope of option '--foo-bar' (registered in global scope) will be selected
in the following order:
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_DEFAULT_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in global scope) will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the PANTS_DEFAULT_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in scope 'compile') will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini
(because of automatic config file fallback to that section).
- The hard-coded value provided at registration time.
- None.
"""
class OptionTrackerRequiredError(Exception):
"""Options requires an OptionTracker instance."""
@classmethod
def complete_scopes(cls, scope_infos):
"""Expand a set of scopes to include all enclosing scopes.
E.g., if the set contains `foo.bar.baz`, ensure that it also contains `foo.bar` and `foo`.
"""
ret = {GlobalOptionsRegistrar.get_scope_info()}
for scope_info in scope_infos:
ret.add(scope_info)
original_scopes = {si.scope for si in scope_infos}
for scope_info in scope_infos:
scope = scope_info.scope
while scope != '':
if scope not in original_scopes:
ret.add(ScopeInfo(scope, ScopeInfo.INTERMEDIATE))
scope = enclosing_scope(scope)
return ret
@classmethod
def create(cls, env, config, known_scope_infos, args=None, bootstrap_option_values=None,
option_tracker=None,):
"""Create an Options instance.
:param env: a dict of environment variables.
:param config: data from a config file (must support config.get[list](section, name, default=)).
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
:param :class:`pants.option.option_tracker.OptionTracker` option_tracker: option tracker
instance to record how option values were assigned.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos)
args = sys.argv if args is None else args
goals, scope_to_flags, target_specs, passthru, passthru_owner = splitter.split_args(args)
if not option_tracker:
raise cls.OptionTrackerRequiredError()
if bootstrap_option_values:
target_spec_files = bootstrap_option_values.target_spec_files
if target_spec_files:
for spec in target_spec_files:
with open(spec) as f:
target_specs.extend(filter(None, [line.strip() for line in f]))
help_request = splitter.help_request
parser_hierarchy = ParserHierarchy(env, config, complete_known_scope_infos, option_tracker)
values_by_scope = {} # Arg values, parsed per-scope on demand.
bootstrap_option_values = bootstrap_option_values
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info,
option_tracker)
def __init__(self, goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info,
option_tracker):
"""The low-level constructor for an Options instance.
Dependees should use `Options.create` instead.
"""
self._goals = goals
self._scope_to_flags = scope_to_flags
self._target_specs = target_specs
self._passthru = passthru
self._passthru_owner = passthru_owner
self._help_request = help_request
self._parser_hierarchy = parser_hierarchy
self._values_by_scope = values_by_scope
self._bootstrap_option_values = bootstrap_option_values
self._known_scope_to_info = known_scope_to_info
self._option_tracker = option_tracker
@property
def tracker(self):
return self._option_tracker
@property
def help_request(self):
return self._help_request
@property
def target_specs(self):
"""The targets to operate on."""
return self._target_specs
@property
def goals(self):
"""The requested goals, in the order specified on the cmd line."""
return self._goals
@property
def known_scope_to_info(self):
return self._known_scope_to_info
@property
def scope_to_flags(self):
return self._scope_to_flags
def drop_flag_values(self):
"""Returns a copy of these options that ignores values specified via flags.
Any pre-cached option values are cleared and only option values that come from option defaults,
the config or the environment are used.
"""
# An empty scope_to_flags to force all values to come via the config -> env hierarchy alone
# and empty values in case we already cached some from flags.
no_flags = {}
no_values = {}
return Options(self._goals,
no_flags,
self._target_specs,
self._passthru,
self._passthru_owner,
self._help_request,
self._parser_hierarchy,
no_values,
self._bootstrap_option_values,
self._known_scope_to_info,
self._option_tracker)
def is_known_scope(self, scope):
"""Whether the given scope is known by this instance."""
return scope in self._known_scope_to_info
def passthru_args_for_scope(self, scope):
# Passthru args "belong" to the last scope mentioned on the command-line.
# Note: If that last scope is a goal, we allow all tasks in that goal to access the passthru
# args. This is to allow the more intuitive
# pants run <target> -- <passthru args>
# instead of requiring
# pants run.py <target> -- <passthru args>.
#
# However note that in the case where multiple tasks run in the same goal, e.g.,
# pants test <target> -- <passthru args>
# Then, e.g., both junit and pytest will get the passthru args even though the user probably
# only intended them to go to one of them. If the wrong one is not a no-op then the error will
# be unpredictable. However this is not a common case, and can be circumvented with an
# explicit test.pytest or test.junit scope.
if (scope and self._passthru_owner and scope.startswith(self._passthru_owner) and
(len(scope) == len(self._passthru_owner) or scope[len(self._passthru_owner)] == '.')):
return self._passthru
else:
return []
def register(self, scope, *args, **kwargs):
"""Register an option in the given scope, using argparse params."""
self.get_parser(scope).register(*args, **kwargs)
def registration_function_for_optionable(self, optionable_class):
"""Returns a function for registering argparse args on the given scope."""
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attatching properties to a function.
def register(*args, **kwargs):
kwargs['registering_class'] = optionable_class
self.register(optionable_class.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = optionable_class.options_scope
return register
def get_parser(self, scope):
"""Returns the parser for the given scope, so code can register on it directly."""
return self._parser_hierarchy.get_parser_by_scope(scope)
def walk_parsers(self, callback):
self._parser_hierarchy.walk(callback)
def for_scope(self, scope):
"""Return the option values for the given scope.
Values are attributes of the returned object, e.g., options.foo.
Computed lazily per scope.
"""
# Short-circuit, if already computed.
if scope in self._values_by_scope:
return self._values_by_scope[scope]
# First get enclosing scope's option values, if any.
if scope == GLOBAL_SCOPE:
values = OptionValueContainer()
else:
values = copy.deepcopy(self.for_scope(enclosing_scope(scope)))
# Now add our values.
flags_in_scope = self._scope_to_flags.get(scope, [])
self._parser_hierarchy.get_parser_by_scope(scope).parse_args(flags_in_scope, values)
self._values_by_scope[scope] = values
for option in values:
self._option_tracker.record_option(scope=scope, option=option, value=values[option],
rank=values.get_rank(option))
return values
def registration_args_iter_for_scope(self, scope):
"""Returns an iterator over the registration arguments of each option in this scope.
See `Parser.registration_args_iter` for details.
"""
return self._parser_hierarchy.get_parser_by_scope(scope).registration_args_iter()
def get_fingerprintable_for_scope(self, scope):
"""Returns a list of fingerprintable (option type, option value) pairs for the given scope.
Fingerprintable options are options registered via a "fingerprint=True" kwarg.
"""
pairs = []
# Note that we iterate over options registered at `scope` and at all enclosing scopes, since
# option-using code can read those values indirectly via its own OptionValueContainer, so
# they can affect that code's output.
registration_scope = scope
while registration_scope is not None:
# This iterator will have already sorted the options, so their order is deterministic.
for (name, _, kwargs) in self.registration_args_iter_for_scope(registration_scope):
if kwargs.get('recursive') and not kwargs.get('recursive_root'):
continue # We only need to fprint recursive options once.
if kwargs.get('fingerprint') is not True:
continue
# Note that we read the value from scope, even if the registration was on an enclosing
# scope, to get the right value for recursive options (and because this mirrors what
# option-using code does).
val = self.for_scope(scope)[name]
val_type = kwargs.get('type', '')
pairs.append((val_type, val))
registration_scope = (None if registration_scope == ''
else enclosing_scope(registration_scope))
return pairs
def __getitem__(self, scope):
# TODO(John Sirois): Mainly supports use of dict<str, dict<str, str>> for mock options in tests,
# Consider killing if tests consolidate on using TestOptions instead of the raw dicts.
return self.for_scope(scope)
def bootstrap_option_values(self):
"""Return the option values for bootstrap options.
General code can also access these values in the global scope. But option registration code
cannot, hence this special-casing of this small set of options.
"""
return self._bootstrap_option_values
def for_global_scope(self):
"""Return the option values for the global scope."""
return self.for_scope(GLOBAL_SCOPE)
| {
"content_hash": "7b47d9619980b51b83718e4e36a15fbe",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 100,
"avg_line_length": 43.51602564102564,
"alnum_prop": 0.688149075642631,
"repo_name": "slyphon/pants",
"id": "ccc662ac935da7171cf16e21a159dc6e0c1caf77",
"size": "13724",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/option/options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "70362"
},
{
"name": "Java",
"bytes": "309840"
},
{
"name": "JavaScript",
"bytes": "28545"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4020643"
},
{
"name": "Scala",
"bytes": "85437"
},
{
"name": "Shell",
"bytes": "49550"
},
{
"name": "Thrift",
"bytes": "2858"
}
],
"symlink_target": ""
} |
"""
WSGI config for mybook project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mybook.settings")
application = get_wsgi_application()
| {
"content_hash": "3510eda86f807e7efcea989af6e908ec",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.3125,
"alnum_prop": 0.7686375321336761,
"repo_name": "kyon-bll/django_mybook",
"id": "8b18d4f263ac3027c0cde0932be144df86aa42ec",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mybook/mybook/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17419"
},
{
"name": "HTML",
"bytes": "2079343"
},
{
"name": "JavaScript",
"bytes": "229832"
},
{
"name": "PHP",
"bytes": "4727"
},
{
"name": "Python",
"bytes": "13832"
},
{
"name": "Shell",
"bytes": "3730"
}
],
"symlink_target": ""
} |
"""
@version: ??
@author: 闫刚
@license:
@contact: [email protected]
@site: http://www.cashmerepipeline.com
@software:
@file: anim_utils.py
@time: 2017/12/11 16:13
"""
import maya.cmds as cmds
def getTimeLineStartEnd():
"""
:return:
"""
start_frame = cmds.playbackOptions(query=True, min = True)
end_frame = cmds.playbackOptions(query = True, max = True) + 1
return (start_frame, end_frame)
def getFPSUnit():
"""
:return:
"""
fps_unit = cmds.currentUnit( query=True, time=True)
return fps_unit | {
"content_hash": "468f8451abcd49eb67ae21359d1dece6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 17.451612903225808,
"alnum_prop": 0.6321626617375231,
"repo_name": "yes7rose/maya_utils",
"id": "5b6eed4e8a839c35563e7c0d26c4c01c7ff20db0",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/maya_utils/anim_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37432"
}
],
"symlink_target": ""
} |
import fiona
import numpy as np
import numpy.ma as ma
import rasterio
import rasterio.features
import rasterio.mask
import pandas as pd
import sys
import pdb
shp_file = '/Users/ricardog/src/eec/data/from-adriana/tropicalforests.shp'
shp_file = '/Users/ricardog/src/eec/predicts/playground/tmp/topical-band34-24.shp'
src_file = '/Users/ricardog/src/eec/data/sources/PAS_1km_2005_0ice.bil'
#src_file = '/Users/ricardog/src/eec/data/sources/pas_1km_2005_0ice.tif'
src_file = 'zip:///Users/ricardog/src/eec/data/sources/PAS_2005.zip!' + \
'PAS_1km_2005_0ice.bil'
out_file = 'pasture-2005-masked.tif'
with rasterio.Env(GDAL_TIFF_INTERNAL_MASK=True):
with fiona.open(shp_file) as shp:
with rasterio.open(src_file, 'r', format='GTiff') as src:
meta = src.meta.copy()
meta.update({'driver': 'GTiff', 'compress': 'lzw', 'predictor': 2})
blocks = src.block_shapes
nans = np.full(blocks[0], np.nan, dtype=np.float32)
with rasterio.open(out_file, 'w', **meta) as dst:
for ji, window in src.block_windows(1):
if ji[0] % 100 == 0:
sys.stdout.write('.')
sys.stdout.flush()
out_transform = src.window_transform(window)
minx, miny = (window[1][0], window[0][0]) * src.affine
maxx, maxy = (window[1][1], window[0][1]) * src.affine
cropped = list(shp.items(bbox=(minx, miny, maxx, maxy)))
if len(cropped) == 0:
pass
#print("%d, %d : skip" % (ji[0], ji[1]))
dst.write(nans, window = window, indexes = 1)
continue
shapes = [feature[1]["geometry"] for feature in cropped]
shape_mask = rasterio.features.geometry_mask(shapes,
transform=out_transform,
invert=False,
out_shape=nans.shape,
all_touched=True)
data = src.read(window=window, masked=True)
data.mask = data.mask | shape_mask
out_shape = data.shape[1:]
df = pd.Series(data.reshape(-1))
df = df.dropna()
#print("%d, %d : %d rows" % (ji[0], ji[1], len(df.index)))
out = df.reindex(range(out_shape[0] * out_shape[1])).values
out = ma.masked_invalid(out.reshape(out_shape))
dst.write(out, window = window, indexes = 1)
print("")
| {
"content_hash": "d5a2e04a1652fcf9e706d380c00689dc",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 82,
"avg_line_length": 44.42857142857143,
"alnum_prop": 0.5562700964630225,
"repo_name": "ricardog/raster-project",
"id": "e1a8be25740146ef03e9d104ef30e9052a2e7f04",
"size": "2511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blocks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "Dockerfile",
"bytes": "509"
},
{
"name": "HTML",
"bytes": "3896"
},
{
"name": "JavaScript",
"bytes": "6062"
},
{
"name": "Jupyter Notebook",
"bytes": "58981"
},
{
"name": "Makefile",
"bytes": "12383"
},
{
"name": "Python",
"bytes": "488587"
},
{
"name": "R",
"bytes": "61424"
},
{
"name": "Shell",
"bytes": "5573"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import datetime
import time
import warnings
from itertools import imap, izip, starmap
from redis.connection import ConnectionPool, UnixDomainSocketConnection
from redis.exceptions import (
ConnectionError,
DataError,
RedisError,
ResponseError,
WatchError,
NoScriptError,
)
def list_or_args(keys, args):
# returns a single list combining keys and args
try:
i = iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, basestring):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
def dict_merge(*dicts):
merged = {}
[merged.update(d) for d in dicts]
return merged
def parse_info(response):
"Parse the result of Redis's INFO command into a Python dict"
info = {}
def get_value(value):
if ',' not in value:
return value
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
try:
sub_dict[k] = int(v)
except ValueError:
sub_dict[k] = v
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
key, value = line.split(':')
try:
if '.' in value:
info[key] = float(value)
else:
info[key] = int(value)
except ValueError:
info[key] = get_value(value)
return info
def pairs_to_dict(response):
"Create a dict given a list of key/value pairs"
it = iter(response)
return dict(izip(it, it))
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options['withscores']:
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return zip(it, imap(score_cast_func, it))
def int_or_none(response):
if response is None:
return None
return int(response)
def float_or_none(response):
if response is None:
return None
return float(response)
def parse_config(response, **options):
# this is stupid, but don't have a better option right now
if options['parse'] == 'GET':
return response and pairs_to_dict(response) or {}
return response == 'OK'
def parse_script(response, **options):
parse = options['parse']
if parse in ('FLUSH', 'KILL'):
return response == 'OK'
if parse == 'EXISTS':
return list(imap(bool, response))
return response
class StrictRedis(object):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS = dict_merge(
string_keys_to_dict(
'AUTH DEL EXISTS EXPIRE EXPIREAT HDEL HEXISTS HMSET MOVE MSETNX '
'PERSIST RENAMENX SADD SISMEMBER SMOVE SETEX SETNX SREM ZADD ZREM',
bool
),
string_keys_to_dict(
'DECRBY GETBIT HLEN INCRBY LINSERT LLEN LPUSHX RPUSHX SCARD '
'SDIFFSTORE SETBIT SETRANGE SINTERSTORE STRLEN SUNIONSTORE ZCARD '
'ZREMRANGEBYRANK ZREMRANGEBYSCORE',
int
),
string_keys_to_dict(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH',
lambda r: isinstance(r, long) and r or r == 'OK'
),
string_keys_to_dict('ZSCORE ZINCRBY', float_or_none),
string_keys_to_dict(
'FLUSHALL FLUSHDB LSET LTRIM MSET RENAME '
'SAVE SELECT SET SHUTDOWN SLAVEOF WATCH UNWATCH',
lambda r: r == 'OK'
),
string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
string_keys_to_dict('SDIFF SINTER SMEMBERS SUNION',
lambda r: r and set(r) or set()
),
string_keys_to_dict('ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
zset_score_pairs
),
string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
{
'BGREWRITEAOF': lambda r: \
r == 'Background rewriting of AOF file started',
'BGSAVE': lambda r: r == 'Background saving started',
'BRPOPLPUSH': lambda r: r and r or None,
'CONFIG': parse_config,
'HGETALL': lambda r: r and pairs_to_dict(r) or {},
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'PING': lambda r: r == 'PONG',
'RANDOMKEY': lambda r: r and r or None,
'SCRIPT': parse_script,
'TTL': lambda r: r != -1 and r or None,
}
)
def __init__(self, host='localhost', port=6379,
db=0, password=None, socket_timeout=None,
connection_pool=None,
charset='utf-8', errors='strict', unix_socket_path=None):
if not connection_pool:
kwargs = {
'db': db,
'password': password,
'socket_timeout': socket_timeout,
'encoding': charset,
'encoding_errors': errors
}
# based on input, setup appropriate connection args
if unix_socket_path:
kwargs.update({
'path': unix_socket_path,
'connection_class': UnixDomainSocketConnection
})
else:
kwargs.update({
'host': host,
'port': port
})
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
def pipeline(self, transaction=False, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return StrictPipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single arguement which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
pipe.watch(*watches)
func(pipe)
return pipe.execute()
except WatchError:
continue
def lock(self, name, timeout=None, sleep=0.1):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
"""
return Lock(self, name, timeout=timeout, sleep=sleep)
def pubsub(self, shard_hint=None):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, shard_hint)
#### COMMAND EXECUTION AND PROTOCOL PARSING ####
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
connection = pool.get_connection(command_name, **options)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except ConnectionError:
connection.disconnect()
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
response = connection.read_response()
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
#### SERVER INFORMATION ####
def bgrewriteaof(self):
"Tell the Redis server to rewrite the AOF file from data in memory."
return self.execute_command('BGREWRITEAOF')
def bgsave(self):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return self.execute_command('BGSAVE')
def config_get(self, pattern="*"):
"Return a dictionary of configuration based on the ``pattern``"
return self.execute_command('CONFIG', 'GET', pattern, parse='GET')
def config_set(self, name, value):
"Set config item ``name`` with ``value``"
return self.execute_command('CONFIG', 'SET', name, value, parse='SET')
def dbsize(self):
"Returns the number of keys in the current database"
return self.execute_command('DBSIZE')
def delete(self, *names):
"Delete one or more keys specified by ``names``"
return self.execute_command('DEL', *names)
__delitem__ = delete
def flushall(self):
"Delete all keys in all databases on the current host"
return self.execute_command('FLUSHALL')
def flushdb(self):
"Delete all keys in the current database"
return self.execute_command('FLUSHDB')
def info(self):
"Returns a dictionary containing information about the Redis server"
return self.execute_command('INFO')
def lastsave(self):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return self.execute_command('LASTSAVE')
def ping(self):
"Ping the Redis server"
return self.execute_command('PING')
def save(self):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return self.execute_command('SAVE')
def shutdown(self):
"Shutdown the server"
try:
self.execute_command('SHUTDOWN')
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
def slaveof(self, host=None, port=None):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguements, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return self.execute_command("SLAVEOF", "NO", "ONE")
return self.execute_command("SLAVEOF", host, port)
#### BASIC KEY COMMANDS ####
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return self.execute_command('APPEND', key, value)
def decr(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return self.execute_command('DECRBY', name, amount)
def exists(self, name):
"Returns a boolean indicating whether key ``name`` exists"
return self.execute_command('EXISTS', name)
__contains__ = exists
def expire(self, name, time):
"Set an expire flag on key ``name`` for ``time`` seconds"
return self.execute_command('EXPIRE', name, time)
def expireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
def get(self, name):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return self.execute_command('GET', name)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value:
return value
raise KeyError(name)
def getbit(self, name, offset):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return self.execute_command('GETBIT', name, offset)
def getset(self, name, value):
"""
Set the value at key ``name`` to ``value`` if key doesn't exist
Return the value at key ``name`` atomically
"""
return self.execute_command('GETSET', name, value)
def incr(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return self.execute_command('INCRBY', name, amount)
def keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
return self.execute_command('KEYS', pattern)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
keys = list_or_args(keys, args)
return self.execute_command('MGET', *keys)
def mset(self, mapping):
"Sets each key in the ``mapping`` dict to its corresponding value"
items = []
for pair in mapping.iteritems():
items.extend(pair)
return self.execute_command('MSET', *items)
def msetnx(self, mapping):
"""
Sets each key in the ``mapping`` dict to its corresponding value if
none of the keys are already set
"""
items = []
for pair in mapping.iteritems():
items.extend(pair)
return self.execute_command('MSETNX', *items)
def move(self, name, db):
"Moves the key ``name`` to a different Redis database ``db``"
return self.execute_command('MOVE', name, db)
def persist(self, name):
"Removes an expiration on ``name``"
return self.execute_command('PERSIST', name)
def randomkey(self):
"Returns the name of a random key"
return self.execute_command('RANDOMKEY')
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
return self.execute_command('RENAME', src, dst)
def renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return self.execute_command('RENAMENX', src, dst)
def set(self, name, value):
"Set the value at key ``name`` to ``value``"
return self.execute_command('SET', name, value)
__setitem__ = set
def setbit(self, name, offset, value):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value = value and 1 or 0
return self.execute_command('SETBIT', name, offset, value)
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value``
that expires in ``time`` seconds
"""
return self.execute_command('SETEX', name, time, value)
def setnx(self, name, value):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return self.execute_command('SETNX', name, value)
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return self.execute_command('SETRANGE', name, offset, value)
def strlen(self, name):
"Return the number of bytes stored in the value of ``name``"
return self.execute_command('STRLEN', name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return self.execute_command('SUBSTR', name, start, end)
def ttl(self, name):
"Returns the number of seconds until the key ``name`` will expire"
return self.execute_command('TTL', name)
def type(self, name):
"Returns the type of key ``name``"
return self.execute_command('TYPE', name)
def watch(self, *names):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
def unwatch(self):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call UNWATCH from a Pipeline object'))
#### LIST COMMANDS ####
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append('BY')
pieces.append(by)
if start is not None and num is not None:
pieces.append('LIMIT')
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, basestring):
pieces.append('GET')
pieces.append(get)
else:
for g in get:
pieces.append('GET')
pieces.append(g)
if desc:
pieces.append('DESC')
if alpha:
pieces.append('ALPHA')
if store is not None:
pieces.append('STORE')
pieces.append(store)
return self.execute_command('SORT', *pieces)
#### SET COMMANDS ####
def sadd(self, name, *values):
"Add ``value(s)`` to set ``name``"
return self.execute_command('SADD', name, *values)
def scard(self, name):
"Return the number of elements in set ``name``"
return self.execute_command('SCARD', name)
def sdiff(self, keys, *args):
"Return the difference of sets specified by ``keys``"
keys = list_or_args(keys, args)
return self.execute_command('SDIFF', *keys)
def sdiffstore(self, dest, keys, *args):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys = list_or_args(keys, args)
return self.execute_command('SDIFFSTORE', dest, *keys)
def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
keys = list_or_args(keys, args)
return self.execute_command('SINTER', *keys)
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys = list_or_args(keys, args)
return self.execute_command('SINTERSTORE', dest, *keys)
def sismember(self, name, value):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return self.execute_command('SISMEMBER', name, value)
def smembers(self, name):
"Return all members of the set ``name``"
return self.execute_command('SMEMBERS', name)
def smove(self, src, dst, value):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return self.execute_command('SMOVE', src, dst, value)
def spop(self, name):
"Remove and return a random member of set ``name``"
return self.execute_command('SPOP', name)
def srandmember(self, name):
"Return a random member of set ``name``"
return self.execute_command('SRANDMEMBER', name)
def srem(self, name, *values):
"Remove ``values`` from set ``name``"
return self.execute_command('SREM', name, *values)
def sunion(self, keys, *args):
"Return the union of sets specifiued by ``keys``"
keys = list_or_args(keys, args)
return self.execute_command('SUNION', *keys)
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *keys)
#### SORTED SET COMMANDS ####
def zadd(self, name, *args, **kwargs):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(args)
for pair in kwargs.iteritems():
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, value, amount=1):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append('withscores')
options = {'withscores': withscores, 'score_cast_func': score_cast_func}
return self.execute_command(*pieces, **options)
def zrangebyscore(self, name, min, max,
start=None, num=None, withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend(['LIMIT', start, num])
if withscores:
pieces.append('withscores')
options = {'withscores': withscores, 'score_cast_func': score_cast_func}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, num, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``num`` sorted in descending order.
``start`` and ``num`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, num]
if withscores:
pieces.append('withscores')
options = {'withscores': withscores, 'score_cast_func': score_cast_func}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min,
start=None, num=None, withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend(['LIMIT', start, num])
if withscores:
pieces.append('withscores')
options = {'withscores': withscores, 'score_cast_func': score_cast_func}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = keys.keys(), keys.values()
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append('WEIGHTS')
pieces.extend(weights)
if aggregate:
pieces.append('AGGREGATE')
pieces.append(aggregate)
return self.execute_command(*pieces)
#### HASH COMMANDS ####
def hdel(self, name, *keys):
"Delete ``keys`` from hash ``name``"
return self.execute_command('HDEL', name, *keys)
def hexists(self, name, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return self.execute_command('HEXISTS', name, key)
def hget(self, name, key):
"Return the value of ``key`` within the hash ``name``"
return self.execute_command('HGET', name, key)
def hgetall(self, name):
"Return a Python dict of the hash's name/value pairs"
return self.execute_command('HGETALL', name)
def hincrby(self, name, key, amount=1):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return self.execute_command('HINCRBY', name, key, amount)
def hkeys(self, name):
"Return the list of keys within hash ``name``"
return self.execute_command('HKEYS', name)
def hlen(self, name):
"Return the number of elements in hash ``name``"
return self.execute_command('HLEN', name)
def hset(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return self.execute_command('HSET', name, key, value)
def hsetnx(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return self.execute_command("HSETNX", name, key, value)
def hmset(self, name, mapping):
"""
Sets each key in the ``mapping`` dict to its corresponding value
in the hash ``name``
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in mapping.iteritems():
items.extend(pair)
return self.execute_command('HMSET', name, *items)
def hmget(self, name, keys):
"Returns a list of values ordered identically to ``keys``"
return self.execute_command('HMGET', name, *keys)
def hvals(self, name):
"Return the list of values within hash ``name``"
return self.execute_command('HVALS', name)
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return self.execute_command('PUBLISH', channel, message)
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the LUA ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVAL', script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
Use the ``sha`` to execute a LUA script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
options = {'parse': 'EXISTS'}
return self.execute_command('SCRIPT', 'EXISTS', *args, **options)
def script_flush(self):
"Flush all scripts from the script cache"
options = {'parse': 'FLUSH'}
return self.execute_command('SCRIPT', 'FLUSH', **options)
def script_kill(self):
"Kill the currently executing LUA script"
options = {'parse': 'KILL'}
return self.execute_command('SCRIPT', 'KILL', **options)
def script_load(self, script):
"Load a LUA ``script`` into the script cache. Returns the SHA."
options = {'parse': 'LOAD'}
return self.execute_command('SCRIPT', 'LOAD', script, **options)
class Redis(StrictRedis):
"""
Provides backwards compatibility with older versions of redis-py that
changed arguments to some commands to be more Pythonic, sane, or by
accident.
"""
def pipeline(self, transaction=False, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def setex(self, name, value, time):
"""
Set the value of key ``name`` to ``value``
that expires in ``time`` seconds
"""
return self.execute_command('SETEX', name, time, value)
def lrem(self, name, value, num=0):
"""
Remove the first ``num`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The ``num`` argument influences the operation in the following ways:
num > 0: Remove elements equal to value moving from head to tail.
num < 0: Remove elements equal to value moving from tail to head.
num = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, num, value)
def zadd(self, name, *args, **kwargs):
"""
NOTE: The order of arguments differs from that of the official ZADD
command. For backwards compatability, this method accepts arguments
in the form of name1, score1, name2, score2, while the official Redis
documents expects score1, name1, score2, name2.
If you're looking to use the standard syntax, consider using the
StrictRedis class. See the API Reference section of the docs for more
information.
Set any number of element-name, score pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: name1, score1, name2, score2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(reversed(args))
for pair in kwargs.iteritems():
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
class PubSub(object):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
def __init__(self, connection_pool, shard_hint=None):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.connection = None
self.channels = set()
self.patterns = set()
self.subscription_count = 0
self.subscribe_commands = set(
('subscribe', 'psubscribe', 'unsubscribe', 'punsubscribe')
)
def execute_command(self, *args, **kwargs):
"Execute a publish/subscribe command"
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'pubsub',
self.shard_hint
)
connection = self.connection
try:
connection.send_command(*args)
return self.parse_response()
except ConnectionError:
connection.disconnect()
# resubscribe to all channels and patterns before
# resending the current command
for channel in self.channels:
self.subscribe(channel)
for pattern in self.patterns:
self.psubscribe(pattern)
connection.send_command(*args)
return self.parse_response()
def parse_response(self):
"Parse the response from a publish/subscribe command"
response = self.connection.read_response()
if response[0] in self.subscribe_commands:
self.subscription_count = response[2]
# if we've just unsubscribed from the remaining channels,
# release the connection back to the pool
if not self.subscription_count:
self.connection_pool.release(self.connection)
self.connection = None
return response
def psubscribe(self, patterns):
"Subscribe to all channels matching any pattern in ``patterns``"
if isinstance(patterns, basestring):
patterns = [patterns]
for pattern in patterns:
self.patterns.add(pattern)
return self.execute_command('PSUBSCRIBE', *patterns)
def punsubscribe(self, patterns=[]):
"""
Unsubscribe from any channel matching any pattern in ``patterns``.
If empty, unsubscribe from all channels.
"""
if isinstance(patterns, basestring):
patterns = [patterns]
for pattern in patterns:
try:
self.patterns.remove(pattern)
except KeyError:
pass
return self.execute_command('PUNSUBSCRIBE', *patterns)
def subscribe(self, channels):
"Subscribe to ``channels``, waiting for messages to be published"
if isinstance(channels, basestring):
channels = [channels]
for channel in channels:
self.channels.add(channel)
return self.execute_command('SUBSCRIBE', *channels)
def unsubscribe(self, channels=[]):
"""
Unsubscribe from ``channels``. If empty, unsubscribe
from all channels
"""
if isinstance(channels, basestring):
channels = [channels]
for channel in channels:
try:
self.channels.remove(channel)
except KeyError:
pass
return self.execute_command('UNSUBSCRIBE', *channels)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
while self.subscription_count:
r = self.parse_response()
if r[0] == 'pmessage':
msg = {
'type': r[0],
'pattern': r[1],
'channel': r[2],
'data': r[3]
}
else:
msg = {
'type': r[0],
'pattern': None,
'channel': r[1],
'data': r[2]
}
yield msg
class BasePipeline(object):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = set(('DISCARD', 'EXEC', 'UNWATCH'))
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
if transaction:
pass # TODO enable this when we're all nutcracker-d
# raise RedisError("Transactions are no longer supported")
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.in_scatter_gather = False
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def reset(self):
self.command_stack = []
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command('UNWATCH')
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def multi(self):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError('Cannot issue nested calls to MULTI')
if self.command_stack:
raise RedisError('Commands without an initial WATCH have already '
'been issued')
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == 'WATCH') and \
not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name,
self.shard_hint)
self.connection = conn
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except ConnectionError:
conn.disconnect()
# if we're not already watching, we can safely retry the command
# assuming it was a connection timeout
if not self.watching:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
self.reset()
raise
def pipeline_execute_command(self, *args, **options):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
if self.in_scatter_gather:
raise RedisError("You must gather() your data before adding another command to the pipeline")
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands):
all_cmds = ''.join(starmap(connection.pack_command,
[args for args, options in commands]))
connection.send_packed_command(all_cmds)
# we don't care about the multi/exec any longer
commands = commands[1:-1]
# parse off the response for MULTI and all commands prior to EXEC.
# the only data we care about is the response the EXEC
# which is the last command
for i in range(len(commands)+1):
self.parse_response(connection, '_')
# parse the EXEC.
response = self.parse_response(connection, '_')
if response is None:
raise WatchError("Watched variable changed.")
if len(response) != len(commands):
raise ResponseError("Wrong number of response items from "
"pipeline execution")
# We have to run response callbacks manually
data = []
for r, cmd in izip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, scatter_gather=False, retry=True):
# build up all commands into a single request to increase network perf
all_cmds = ''.join(starmap(connection.pack_command,
[args for args, options in commands]))
connection.send_packed_command(all_cmds)
def _gather():
if scatter_gather and not self.in_scatter_gather:
raise RedisError("Can't gather results twice")
try:
return [self.parse_response(connection, args[0], **options)
for args, options in commands]
except ConnectionError, e:
connection.disconnect()
self.in_scatter_gather = False
if retry:
return self._execute_pipeline(connection, commands, scatter_gather=False, retry=False)
else:
raise e
finally:
if self.in_scatter_gather:
self.in_scatter_gather = False
self.reset()
if scatter_gather:
self.in_scatter_gather = True
return _gather
else:
return _gather()
def parse_response(self, connection, command_name, **options):
result = StrictRedis.parse_response(
self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == 'WATCH':
self.watching = True
return result
def execute(self, scatter_gather=False):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if self.transaction or self.explicit_transaction:
stack = [(('MULTI' ,), {})] + stack + [(('EXEC', ), {})]
execute = self._execute_transaction
if scatter_gather:
raise RedisError("Transactions don't support scatter/gather")
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('MULTI', self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
execute_kwargs = {'scatter_gather': scatter_gather} if scatter_gather else {}
try:
return execute(conn, stack, **execute_kwargs)
except ConnectionError:
conn.disconnect()
# if we were watching a variable, the watch is no longer valid since
# this connection has died. raise a WatchError, which indicates
# the user should retry his transaction. If this is more than a
# temporary failure, the WATCH that the user next issue will fail,
# propegating the real ConnectionError
if self.watching:
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return execute(conn, stack, **execute_kwargs)
finally:
if not scatter_gather:
self.reset()
def watch(self, *names):
"""
Watches the values at keys ``names``
"""
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
return self.execute_command('WATCH', *names)
def unwatch(self):
"""
Unwatches all previously specified keys
"""
return self.watching and self.execute_command('UNWATCH') or True
class StrictPipeline(BasePipeline, StrictRedis):
"Pipeline for the StrictRedis class"
pass
class Pipeline(BasePipeline, Redis):
"Pipeline for the Redis class"
pass
class LockError(RedisError):
"Errors thrown from the Lock"
pass
class Lock(object):
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
LOCK_FOREVER = float(2**31+1) # 1 past max unix time
def __init__(self, redis, name, timeout=None, sleep=0.1):
"""
Create a new Lock instnace named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
Note: If using ``timeout``, you should make sure all the hosts
that are running clients have their time synchronized with a network time
service like ntp.
"""
self.redis = redis
self.name = name
self.acquired_until = None
self.timeout = timeout
self.sleep = sleep
if self.timeout and self.sleep > self.timeout:
raise LockError("'sleep' must be less than 'timeout'")
def __enter__(self):
return self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def acquire(self, blocking=True):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
"""
sleep = self.sleep
timeout = self.timeout
while 1:
unixtime = int(time.time())
if timeout:
timeout_at = unixtime + timeout
else:
timeout_at = Lock.LOCK_FOREVER
timeout_at = float(timeout_at)
if self.redis.setnx(self.name, timeout_at):
self.acquired_until = timeout_at
return True
# We want blocking, but didn't acquire the lock
# check to see if the current lock is expired
existing = float(self.redis.get(self.name) or 1)
if existing < unixtime:
# the previous lock is expired, attempt to overwrite it
existing = float(self.redis.getset(self.name, timeout_at) or 1)
if existing < unixtime:
# we successfully acquired the lock
self.acquired_until = timeout_at
return True
if not blocking:
return False
time.sleep(sleep)
def release(self):
"Releases the already acquired lock"
if self.acquired_until is None:
raise ValueError("Cannot release an unlocked lock")
existing = float(self.redis.get(self.name) or 1)
# if the lock time is in the future, delete the lock
if existing >= self.acquired_until:
self.redis.delete(self.name)
self.acquired_until = None
| {
"content_hash": "2ddb74ef223fd9470ffe852284ebda88",
"timestamp": "",
"source": "github",
"line_count": 1658,
"max_line_length": 106,
"avg_line_length": 37.51568154402895,
"alnum_prop": 0.5907782833073423,
"repo_name": "Instagram/redis-py",
"id": "43222d6dcb6fe010c490930047a72fd9901cf582",
"size": "62201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "redis/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136974"
}
],
"symlink_target": ""
} |
def n_(num, strng):
return strng if num == 1 else strng + 's'
| {
"content_hash": "1bb8b0bccc9b061dd40c855f4296ae8a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 33,
"alnum_prop": 0.5909090909090909,
"repo_name": "resmio/nslocapysation",
"id": "4b6a788b418d643aff6c592d027251d510d3953c",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nslocapysation/utils/n_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36854"
}
],
"symlink_target": ""
} |
import pygame, random
from pygame.locals import *
pygame.init()
pygame.mixer.init()
size = width, height = 500, 500
screen = pygame.display.set_mode((size))
pygame.display.set_caption("Run away Spidy")
bg = pygame.image.load("img/bg.png")
bg = bg.convert()
bgrect = bg.get_rect()
screen.blit(bg, (0,0))
song = pygame.mixer.Sound("wav/song.wav")
vl = pygame.mixer.Sound("wav/vl.wav")
#pygame.mixer.music.play(-1)
#song.play()
pygame.mouse.set_visible(False)
sm = pygame.image.load("img/sm.gif")
smrect = sm.get_rect()
ouch = pygame.image.load("img/ouch.jpg")
ouchR = ouch.get_rect()
v = pygame.image.load("img/v.gif")
vrect = v.get_rect()
web = pygame.image.load("img/web.jpg")
webrect = web.get_rect()
black = 0, 0, 0
white = 250,250,250
print(smrect)
smrect.move_ip(10,10)
vrect.move_ip(100,100)
while True:
for event in pygame.event.get():
if event.type == KEYDOWN and event.key == K_ESCAPE:
pygame.quit()
if event.type == KEYDOWN and event.key == K_DOWN:
smrect = smrect.move([0,10])
if event.type == KEYDOWN and event.key == K_UP:
smrect = smrect.move([0,-10])
if event.type == KEYDOWN and event.key == K_RIGHT:
smrect = smrect.move([10,0])
if event.type == KEYDOWN and event.key == K_LEFT:
smrect = smrect.move([-10,0])
screen.fill(white)
#screen.blit(bg, (0,0))
print("sm",smrect)
print("v ",vrect)
print("collide",vrect.collidelist([smrect]) )
if vrect.collidelist([smrect])==0:
vl.play()
screen.blit(ouch,ouchR)
pygame.display.flip()
pygame.time.delay(3000)
pygame.quit()
print("random",random.randint(-10,10))
x=random.randint(-1,1)
y=random.randint(-1,1)
speed=20
vrect = vrect.move([x*speed,y*speed])
print("contains",webrect.contains(smrect) )
screen.blit(web,webrect)
screen.blit(sm, smrect)
screen.blit(v, vrect)
pygame.display.flip()
pygame.time.delay(100)
pygame.quit()
# smrect = smrect.move(speed)
# if smrect.left < 0 or smrect.right > width:
# speed[0] = -speed[0]
# if smrect.top < 0 or smrect.bottom > height:
# speed[1] = -speed[1]
| {
"content_hash": "372376fc148f6be2125bf518fccd2c7a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 55,
"avg_line_length": 24.114942528735632,
"alnum_prop": 0.6506196377502383,
"repo_name": "bszcz/python",
"id": "e45dbcd582d274c73894399714ad5c6f4767b55c",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_away_spidy_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "1248"
},
{
"name": "Python",
"bytes": "40016"
},
{
"name": "Shell",
"bytes": "805"
}
],
"symlink_target": ""
} |
'''
test for vm migrate with assigned host uuid in newly add vcenter
@author: SyZhao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.vcenter_operations as vct_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstacklib.utils.ssh as ssh
import test_stub
import os
vcenter_uuid = None
another_host_uuid = None
vm = None
def test():
global vcenter_uuid, vm
vcenter1_name = os.environ['vcenter2_name']
vcenter1_domain_name = os.environ['vcenter2_ip']
vcenter1_username = os.environ['vcenter2_domain_name']
vcenter1_password = os.environ['vcenter2_password']
ova_image_name = os.environ['vcenter2_template_exist']
network_pattern1 = os.environ['vcenter2_network_pattern1']
zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid)
vcenter_uuid = inv.uuid
if vcenter_uuid == None:
test_util.test_fail("vcenter_uuid is None")
#this test suppose user has already deployed a cluster with more than 2 hosts included,
#the vm is created in one of the host, then start the vm with another host uuid,
#which leads vm migration triggering.
vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-start-stop-test', image_name = ova_image_name, l3_name = network_pattern1)
vm.check()
vm.stop()
vm.check()
vm_host_uuid = test_lib.lib_get_vm_host(vm.get_vm()).uuid
host_cond = res_ops.gen_query_conditions("status", '=', "Connected")
host_uuids = res_ops.query_resource_fields(res_ops.HOST, host_cond, None, fields=['uuid'])
for host_uuid in host_uuids:
if host_uuid != vm_host_uuid:
another_host_uuid = host_uuid
break
test_stub.start_vm_with_host_uuid(vm.get_vm(), vm_host_uuid)
vm.check()
vm.destroy()
vm.check()
vm.expunge()
vct_ops.delete_vcenter(vcenter_uuid)
test_util.test_pass("vm start and stop of vcenter test passed.")
def error_cleanup():
global vcenter_uuid, vm
if vm:
vm.destroy()
vm.expunge()
if vcenter_uuid:
vct_ops.delete_vcenter(vcenter_uuid)
| {
"content_hash": "94b92a0463a25f4bf08cad9c0c611a4e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 128,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.6788,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "ed9617746a27c3faea48af67e130fcd01b2ee340",
"size": "2500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/vcenter/test_vcenter_vm_migrate_by_vm_start.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.checks import Error
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
Band, Concert, ValidationTestInlineModel, ValidationTestModel,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a many-to-many field.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a foreign key, or a many-to-many field."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a many-to-many field.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'date_hierarchy' refers to 'non_existent_field', which "
"is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E127')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'BaseModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
""" Test if `model` attribute on inline model admin is a models.Model.
"""
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
Ensure that has_add_permission returns True for users who can add
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
Ensure that has_change_permission returns True for users who can edit
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
Ensure that has_delete_permission returns True for users who can delete
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
Ensure that has_module_permission returns True for users who have any
permission for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| {
"content_hash": "55d9757b26a4d390eb239cbe7e32d480",
"timestamp": "",
"source": "github",
"line_count": 1703,
"max_line_length": 116,
"avg_line_length": 34.50029359953024,
"alnum_prop": 0.607243762126834,
"repo_name": "gitaarik/django",
"id": "790d24f66d9342c25b00bbbfdaf5613e710a751d",
"size": "58754",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/modeladmin/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52372"
},
{
"name": "HTML",
"bytes": "170531"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11546984"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""
Demonstrates how to limit the extent of grid lines
"""
# Major library imports
from numpy import array, linspace, zeros
from scipy.special import jn
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, View
# Chaco imports
from chaco.api import ArrayPlotData, HPlotContainer, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some x-y data series to plot
x = linspace(-2.0, 10.0, 100)
pd = ArrayPlotData(index = x)
for i in range(5):
pd.set_data("y" + str(i), jn(i,x))
# Create some line plots of some of the data
plot = Plot(pd, title="Line Plot", padding=50, border_visible=True)
plot.legend.visible = True
plot.plot(("index", "y0", "y1", "y2"), name="j_n, n<3", color="auto")
plot.plot(("index", "y3"), name="j_3", color="auto")
plot.x_grid.line_color = "black"
plot.y_grid.line_color = "black"
xmin, xmax = 1.0, 6.0
ymin, ymax = 0.2, 0.80001
plot.x_grid.set(data_min = xmin, data_max = xmax,
transverse_bounds = (ymin, ymax),
transverse_mapper = plot.y_mapper)
plot.y_grid.set(data_min = ymin, data_max = ymax,
transverse_bounds = (xmin, xmax),
transverse_mapper = plot.x_mapper)
# Attach some tools to the plot
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
# A second plot whose vertical grid lines are clipped to the jn(3) function
def my_bounds_func(ticks):
""" Returns y_low and y_high for each grid tick in the array **ticks** """
tmp = array([zeros(len(ticks)),jn(3, ticks)]).T
return tmp
func_plot = Plot(pd, padding=50, border_visible=True)
func_plot.plot(("index", "y3"), color="red")
func_plot.x_grid.set(transverse_bounds = my_bounds_func,
transverse_mapper = func_plot.y_mapper,
line_color="black")
func_plot.tools.append(PanTool(func_plot))
container = HPlotContainer()
container.add(plot)
container.add(func_plot)
return container
#===============================================================================
# Attributes to use for the plot view.
size=(900,500)
title="Grids with bounded extents"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(UItem('plot', editor=ComponentEditor()),
width=size[0], height=size[1], resizable=True,
title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
| {
"content_hash": "cbfacc14b14f671c1d599d385630a65d",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 82,
"avg_line_length": 33.96808510638298,
"alnum_prop": 0.5530848731600376,
"repo_name": "tommy-u/chaco",
"id": "b3dddacb53fd901bfea6387f4fdbc257aefb6665",
"size": "3215",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/demo/basic/bounded_grids.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "2475987"
}
],
"symlink_target": ""
} |
import numpy as np
from .spectrum import Spectrum, unify_mz
from .spectrum_io import hdf5_load
from .spectrum_utils import ThresholdedPeakFiltering
from copy import deepcopy
from sklearn.metrics import zero_one_loss, f1_score, precision_score, recall_score
def load_spectra(datafile):
"""
Loads the spectra from an hdf5 file into memory
:param datafile: the hdf5 file containing the spectra
:return: the spectra in an ndarray.
"""
spectra = hdf5_load(datafile)
thresher = ThresholdedPeakFiltering(threshold=250)
spectra = thresher.fit_transform(spectra)
return spectra
def spectrum_to_matrix(spectra):
"""
Convert an array of spectra to a ndarray
:param spectra: The spectra to extract
:return: ndarray of the peak intensities
"""
new_spectra = deepcopy(spectra)
unify_mz(new_spectra)
data = []
for s in new_spectra:
data.append(s.intensity_values)
return np.asarray(data)
def extract_tags(spectra):
tags = []
for s in spectra:
if("_Non_Infected_" in s.metadata["file"]):
tags.append(0)
else:
tags.append(1)
return np.asarray(tags)
def evaluate_learner(y_true, y_pred):
results = {}
results["zero_one_loss"] = zero_one_loss(y_true, y_pred)
results["f1_score"] = f1_score(y_true, y_pred)
results["precision"] = precision_score(y_true, y_pred)
results["recall"] = recall_score(y_true, y_pred)
return results | {
"content_hash": "f92fb5dc02dda9d9e19ac0b440a2d375",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 82,
"avg_line_length": 28.88235294117647,
"alnum_prop": 0.6727766463000678,
"repo_name": "francisbrochu/microbiome-summer-school-2017_mass-spec",
"id": "09a089aab41012711e18f9e6756e0f64ecfbed2b",
"size": "1498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/tutorial_code/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17554"
},
{
"name": "C++",
"bytes": "39869"
},
{
"name": "CMake",
"bytes": "18489"
},
{
"name": "Jupyter Notebook",
"bytes": "16468"
},
{
"name": "Makefile",
"bytes": "32922"
},
{
"name": "Python",
"bytes": "37021"
},
{
"name": "Shell",
"bytes": "586"
}
],
"symlink_target": ""
} |
import math
from i2c_core import i2c_core
class HMC5883(object):
# Define registers values from datasheet
ConfigurationRegisterA = 0x00
ConfigurationRegisterB = 0x01
ModeRegister = 0x02
AxisXDataRegisterMSB = 0x03
AxisXDataRegisterLSB = 0x04
AxisZDataRegisterMSB = 0x05
AxisZDataRegisterLSB = 0x06
AxisYDataRegisterMSB = 0x07
AxisYDataRegisterLSB = 0x08
StatusRegister = 0x09
IdentificationRegisterA = 0x10
IdentificationRegisterB = 0x11
IdentificationRegisterC = 0x12
MeasurementContinuous = 0x00
MeasurementSingleShot = 0x01
MeasurementIdle = 0x03
def __init__(self, address=0x1e, busnum=-1, gauss=1.3, debug=False):
self.debug = debug
self.i2c = i2c_core(address, busnum=busnum, debug=debug,)
self.i2c.write_8(self.ConfigurationRegisterA, 0b01110000) # Set to 8 samples @ 15Hz
self.set_scale(gauss, debug=debug)
self.set_continuous_mode() # Continuous sampling
# def read_word(self, reg):
# high = self.i2c.read_byte(address, reg)
# low = self.i2c.read_byte(address, reg+1)
# val = (high << 8) + low
# return val
# def read_word_2c(self, reg):
# val = read_word(reg)
# if (val >= 0x8000):
# return -((65535 - val) + 1)
# else:
# return val
def set_scale(self, gauss, debug=False):
if gauss == 0.88:
self.scale_reg = 0x00
self.scale = 0.73
elif gauss == 1.3:
self.scale_reg = 0x01
self.scale = 0.92
elif gauss == 1.9:
self.scale_reg = 0x02
self.scale = 1.22
elif gauss == 2.5:
self.scale_reg = 0x03
self.scale = 1.52
elif gauss == 4.0:
self.scale_reg = 0x04
self.scale = 2.27
elif gauss == 4.7:
self.scale_reg = 0x05
self.scale = 2.56
elif gauss == 5.6:
self.scale_reg = 0x06
self.scale = 3.03
elif gauss == 8.1:
self.scale_reg = 0x07
self.scale = 4.35
self.scale_reg = self.scale_reg << 5
self.set_option(self.ConfigurationRegisterB, self.scale_reg)
if debug == True:
print("HMC5883L set : gauss "+gauss+", scale "+scale)
def set_option(self, register, *function_set):
options = 0x00
for function in function_set:
options = options | function
self.i2c.write_8(register, options)
def get_axes(self):
magno_x = self.i2c.read_word_2c(self.AxisXDataRegisterMSB)
magno_y = self.i2c.read_word_2c(self.AxisYDataRegisterMSB)
magno_z = self.i2c.read_word_2c(self.AxisZDataRegisterMSB)
if (magno_x == -4096):
magno_x = None
else:
magno_x = round(magno_x * self.scale, 4)
if (magno_y == -4096):
magno_y = None
else:
magno_y = round(magno_y * self.scale, 4)
if (magno_z == -4096):
magno_z = None
else:
magno_z = round(magno_z * self.scale, 4)
return (magno_x, magno_y, magno_z)
def get_heading(self):
(scaled_x, scaled_y, scaled_z) = self.get_axes()
heading_rad = math.atan2(scaled_y, scaled_x)
heading_rad += self.declination
# Correct for reversed heading
if(heading_rad < 0):
heading_rad += 2 * math.pi
# Check for wrap and compensate
if(heading_rad > 2 * math.pi):
heading_rad -= 2 * math.pi
# Convert to degrees from radians
heading_deg = heading_rad * 180 / math.pi
degrees = math.floor(heading_deg)
minutes = round(((heading_deg - degrees) * 60))
return (degrees, minutes)
def set_declination(self, degree, min=0):
self.declinationDeg = degree
self.declinationMin = min
self.declination = (degree + min / 60) * (math.pi / 180)
def __str__(self):
ret_str = ""
(x, y, z) = self.get_axes()
ret_str += "Axis X: " + str(x) + "\n"
ret_str += "Axis Y: " + str(y) + "\n"
ret_str += "Axis Z: " + str(z) + "\n"
ret_str += "Declination: " + self.get_declination_string() + "\n"
ret_str += "Heading: " + self.get_heading_string() + "\n"
return ret_str
def get_declination_string(self):
return str(self.declinationDeg) + " deg, " + str(self.declinationMin) + " minutes"
def get_heading_string(self):
(degrees, minutes) = self.get_heading()
return str(degrees) + " deg, " + str(minutes) + " minutes"
def set_continuous_mode(self):
self.set_option(self.ModeRegister, self.MeasurementContinuous)
if __name__ == "__main__":
# constructor defaults : address=0x1e, gauss=1.3, debug=False
i2c_HMC5883l = HMC5883(gauss=1.3)
i2c_HMC5883l.set_declination(2, 18)
while True:
print i2c_HMC5883l.get_heading()
| {
"content_hash": "f3e2d51203b7e1c70c7e754d06369230",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 93,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.5738705738705738,
"repo_name": "CaptainStouf/python_libs",
"id": "19b6c4902bfd1fd2b52041b8b0196125ae72a8c5",
"size": "5556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i2c/i2c_hmc5883l.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42017"
}
],
"symlink_target": ""
} |
import luigi.worker
from luigi.worker import Worker
from luigi import Task, RemoteScheduler, Parameter
import unittest
import logging
import luigi.notifications
from mock import Mock
luigi.notifications.DEBUG = True
class DummyTask(Task):
param = Parameter()
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
old_value = self.has_run
self.has_run = True
return old_value
def run(self):
logging.debug("%s - setting has_run", self.task_id)
self.has_run = True
class MultiprocessWorkerTest(unittest.TestCase):
def setUp(self):
self.scheduler = RemoteScheduler()
self.scheduler.add_worker = Mock()
self.scheduler.add_task = Mock()
self.worker = Worker(scheduler=self.scheduler, worker_id='X', worker_processes=2)
def tearDown(self):
self.worker.stop()
def test_positive_path(self):
a = DummyTask("a")
b = DummyTask("b")
class MultipleRequirementTask(DummyTask):
def requires(self):
return [a, b]
c = MultipleRequirementTask("C")
self.assertTrue(self.worker.add(c))
self.scheduler.get_work = Mock(side_effect=[(3, str(a)), (2, str(b)), (1, str(c)), (0, None), (0, None)])
self.assertTrue(self.worker.run())
self.assertTrue(c.has_run)
def test_path_with_task_failures(self):
class FailingTask(DummyTask):
def run(self):
raise Exception("I am failing")
a = FailingTask("a")
b = FailingTask("b")
class MultipleRequirementTask(DummyTask):
def requires(self):
return [a, b]
c = MultipleRequirementTask("C")
self.assertTrue(self.worker.add(c))
self.scheduler.get_work = Mock(side_effect=[(3, str(a)), (2, str(b)), (1, str(c)), (0, None), (0, None)])
self.assertFalse(self.worker.run())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4f188c0adf4f6e376113ffb336b97ba0",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 113,
"avg_line_length": 26.44871794871795,
"alnum_prop": 0.5971885603490062,
"repo_name": "Mappy/luigi",
"id": "b4f68f3861a988d4f39bc22408a4515b44aacb1b",
"size": "2638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/worker_multiprocess_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from oslo_log import log as logging
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
from cinder.i18n import _
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
Table('snapshots', meta, autoload=True)
# New table
snapshot_metadata = Table(
'snapshot_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('snapshot_id', String(length=36), ForeignKey('snapshots.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB'
)
try:
snapshot_metadata.create()
except Exception:
LOG.error(_("Table |%s| not created!"), repr(snapshot_metadata))
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
snapshot_metadata = Table('snapshot_metadata',
meta,
autoload=True)
try:
snapshot_metadata.drop()
except Exception:
LOG.error(_("snapshot_metadata table not dropped"))
raise
| {
"content_hash": "7c4ee05543e5f0738bb3f04ff3eb2bc5",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 28.979166666666668,
"alnum_prop": 0.6096333572969087,
"repo_name": "tmenjo/cinder-2015.1.1",
"id": "b5377115e9de74110d6e5303fb9dfd0fa021401a",
"size": "1964",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10804398"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
"""
Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
import itertools
from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
from numpy.core.overrides import array_function_dispatch
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
"""
Computes the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
30
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
60
"""
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
for curr in full_results:
cost, positions, remaining = curr
for con in itertools.combinations(range(len(input_sets) - iteration), 2):
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Build (total_cost, positions, indices_remaining)
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
new_pos = positions + [con]
iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
if iter_results:
full_results = iter_results
else:
path = min(full_results, key=lambda x: x[0])[1]
path += [tuple(range(len(input_sets) - iteration))]
return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
"""Compute the cost (removed size + flops) and resultant indices for
performing the contraction specified by ``positions``.
Parameters
----------
positions : tuple of int
The locations of the proposed tensors to contract.
input_sets : list of sets
The indices found on each tensors.
output_set : set
The output indices of the expression.
idx_dict : dict
Mapping of each index to its size.
memory_limit : int
The total allowed size for an intermediary tensor.
path_cost : int
The contraction cost so far.
naive_cost : int
The cost of the unoptimized expression.
Returns
-------
cost : (int, int)
A tuple containing the size of any indices removed, and the flop cost.
positions : tuple of int
The locations of the proposed tensors to contract.
new_input_sets : list of sets
The resulting new list of indices if this proposed contraction is performed.
"""
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(idx_result, idx_dict)
if new_size > memory_limit:
return None
# Build sort tuple
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
removed_size = sum(old_sizes) - new_size
# NB: removed_size used to be just the size of any removed indices i.e.:
# helpers.compute_size_by_dict(idx_removed, idx_dict)
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
sort = (-removed_size, cost)
# Sieve based on total cost as well
if (path_cost + cost) > naive_cost:
return None
# Add contraction to possible choices
return [sort, positions, new_input_sets]
def _update_other_results(results, best):
"""Update the positions and provisional input_sets of ``results`` based on
performing the contraction result ``best``. Remove any involving the tensors
contracted.
Parameters
----------
results : list
List of contraction results produced by ``_parse_possible_contraction``.
best : list
The best contraction of ``results`` i.e. the one that will be performed.
Returns
-------
mod_results : list
The list of modified results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
bx, by = best_con
mod_results = []
for cost, (x, y), con_sets in results:
# Ignore results involving tensors just contracted
if x in best_con or y in best_con:
continue
# Update the input_sets
del con_sets[by - int(by > x) - int(by > y)]
del con_sets[bx - int(bx > x) - int(bx > y)]
con_sets.insert(-1, best[2][-1])
# Update the position indices
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
mod_results.append((cost, mod_con, con_sets))
return mod_results
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
# Handle trivial cases that leaked through
if len(input_sets) == 1:
return [(0,)]
elif len(input_sets) == 2:
return [(0, 1)]
# Build up a naive cost
contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
# Initially iterate over all pairs
comb_iter = itertools.combinations(range(len(input_sets)), 2)
known_contractions = []
path_cost = 0
path = []
for iteration in range(len(input_sets) - 1):
# Iterate over all pairs on first step, only previously found pairs on subsequent steps
for positions in comb_iter:
# Always initially ignore outer products
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
continue
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
naive_cost)
if result is not None:
known_contractions.append(result)
# If we do not have a inner contraction, rescan pairs including outer products
if len(known_contractions) == 0:
# Then check the outer products
for positions in itertools.combinations(range(len(input_sets)), 2):
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
path_cost, naive_cost)
if result is not None:
known_contractions.append(result)
# If we still did not find any remaining contractions, default back to einsum like behavior
if len(known_contractions) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(known_contractions, key=lambda x: x[0])
# Now propagate as many unused contractions as possible to next iteration
known_contractions = _update_other_results(known_contractions, best)
# Next iteration only compute contractions with the new tensor
# All other contractions have been accounted for
input_sets = best[2]
new_tensor_pos = len(input_sets) - 1
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
# Update path and total cost
path.append(best[1])
path_cost += best[0][1]
return path
def _can_dot(inputs, result, idx_removed):
"""
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
Parameters
----------
inputs : list of str
Specifies the subscripts for summation.
result : str
Resulting summation.
idx_removed : set
Indices that are removed in the summation
Returns
-------
type : bool
Returns true if BLAS should and can be used, else False
Notes
-----
If the operations is BLAS level 1 or 2 and is not already aligned
we default back to einsum as the memory movement to copy is more
costly than the operation itself.
Examples
--------
# Standard GEMM operation
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
True
# Can use the standard BLAS, but requires odd data movement
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
False
# DDOT where the memory is not aligned
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
False
"""
# All `dot` calls remove indices
if len(idx_removed) == 0:
return False
# BLAS can only handle two operands
if len(inputs) != 2:
return False
input_left, input_right = inputs
for c in set(input_left + input_right):
# can't deal with repeated indices on same input or more than 2 total
nl, nr = input_left.count(c), input_right.count(c)
if (nl > 1) or (nr > 1) or (nl + nr > 2):
return False
# can't do implicit summation or dimension collapse e.g.
# "ab,bc->c" (implicitly sum over 'a')
# "ab,ca->ca" (take diagonal of 'a')
if nl + nr - 1 == int(c in result):
return False
# Build a few temporaries
set_left = set(input_left)
set_right = set(input_right)
keep_left = set_left - idx_removed
keep_right = set_right - idx_removed
rs = len(idx_removed)
# At this point we are a DOT, GEMV, or GEMM operation
# Handle inner products
# DDOT with aligned data
if input_left == input_right:
return True
# DDOT without aligned data (better to use einsum)
if set_left == set_right:
return False
# Handle the 4 possible (aligned) GEMV or GEMM cases
# GEMM or GEMV no transpose
if input_left[-rs:] == input_right[:rs]:
return True
# GEMM or GEMV transpose both
if input_left[:rs] == input_right[-rs:]:
return True
# GEMM or GEMV transpose right
if input_left[-rs:] == input_right[-rs:]:
return True
# GEMM or GEMV transpose left
if input_left[:rs] == input_right[:rs]:
return True
# Einsum is faster than GEMV if we have to copy data
if not keep_left or not keep_right:
return False
# We are a matrix-matrix product, but we need to copy data
return True
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> np.random.seed(123)
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b]) # may vary
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b]) # may vary
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], basestring):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(operands[num].ndim, 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def _einsum_path_dispatcher(*operands, **kwargs):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
# signatures), so as a practical shortcut we dispatch on everything.
# Strings will be ignored for dispatching since they don't define
# __array_function__.
return operands
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as represented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> np.random.seed(123)
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il # may vary
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
... optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize', 'einsum_call']
unknown_kwargs = [k for (k, v) in kwargs.items() if k
not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs:"
" %s" % unknown_kwargs)
# Figure out what the path really is
path_type = kwargs.pop('optimize', True)
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, basestring):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = kwargs.pop("einsum_call", False)
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
broadcast_indices = [[] for x in range(len(input_list))]
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d."
% (input_subscripts[tnum], tnum))
for cnum, char in enumerate(term):
dim = sh[cnum]
# Build out broadcast indices
if dim == 1:
broadcast_indices[tnum].append(char)
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (char, tnum, dimension_dict[char], dim))
else:
dimension_dict[char] = dim
# Convert broadcast inds to sets
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
size_list = [_compute_size_by_dict(term, dimension_dict)
for term in input_list + [output_subscript]]
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isn't quite right, need to look into exactly how einsum does this
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
bcast = set()
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
bcast |= broadcast_indices.pop(x)
new_bcast_inds = bcast - idx_removed
# If we're broadcasting, nix blas
if not len(idx_removed & bcast):
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
else:
do_blas = False
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
broadcast_indices.append(new_bcast_inds)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
def _einsum_dispatcher(*operands, **kwargs):
# Arguably we dispatch on more arguments that we really should; see note in
# _einsum_path_dispatcher for why.
for op in operands:
yield op
yield kwargs.get('out')
# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. In some cases 'optimal'
will return the superlative path through a more expensive, exhaustive search.
For iterative calculations it may be advisable to calculate the optimal path
once and reuse that path by supplying it as an argument. An example is given
below.
See :py:func:`numpy.einsum_path` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path or pre-computing the
'optimal' path and repeatedly applying it, using an
`einsum_path` insertion (since version 1.12.0). Performance improvements can be
particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
Greedy `einsum` (faster optimal path approximation): ~160ms
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
Optimal `einsum` (best usage pattern in some use cases): ~110ms
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if
k in valid_einsum_kwargs}
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Special handeling if out is specified
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if out_array is not None:
specified_out = True
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
handle_out = False
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
tmp_operands = [operands.pop(x) for x in inds]
# Do we need to deal with the output?
handle_out = specified_out and ((num + 1) == len(contraction_list))
# Call tensordot if still possible
if blas:
# Checks have already been handled
input_str, results_index = einsum_str.split('->')
input_left, input_right = input_str.split(',')
tensor_result = input_left + input_right
for s in idx_rm:
tensor_result = tensor_result.replace(s, "")
# Find indices to contract over
left_pos, right_pos = [], []
for s in sorted(idx_rm):
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
# Contract!
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
# Build a new view if needed
if (tensor_result != results_index) or handle_out:
if handle_out:
einsum_kwargs["out"] = out_array
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
# Call einsum
else:
# If out was specified
if handle_out:
einsum_kwargs["out"] = out_array
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0]
| {
"content_hash": "7d3ccbd56d4c2d74f6c252e5f47c6e10",
"timestamp": "",
"source": "github",
"line_count": 1432,
"max_line_length": 118,
"avg_line_length": 35.80377094972067,
"alnum_prop": 0.5862963468627489,
"repo_name": "MSeifert04/numpy",
"id": "3412c3fd5a7e16cebd666e8cef4aee59fd58c986",
"size": "51271",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "numpy/core/einsumfunc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9050105"
},
{
"name": "C++",
"bytes": "189464"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8251054"
},
{
"name": "Shell",
"bytes": "8345"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
import os
import unittest
from quickbooks.client import QuickBooks
from quickbooks.objects.taxrate import TaxRate
class TaxRateTest(unittest.TestCase):
def setUp(self):
self.qb_client = QuickBooks(
sandbox=True,
consumer_key=os.environ.get('CONSUMER_KEY'),
consumer_secret=os.environ.get('CONSUMER_SECRET'),
access_token=os.environ.get('ACCESS_TOKEN'),
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET'),
company_id=os.environ.get('COMPANY_ID')
)
def test_read(self):
tax_rates = TaxRate.all(max_results=1, qb=self.qb_client)
self.assertEquals(len(tax_rates), 1)
| {
"content_hash": "165d3b74e46e9c31c4ae4333ebc111b7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 29.91304347826087,
"alnum_prop": 0.6482558139534884,
"repo_name": "porn/python-quickbooks",
"id": "30da1ff4413104562e0a921b425eac5932aa6dc3",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_taxrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "208523"
}
],
"symlink_target": ""
} |
from pybind11_tests import chrono as m
import datetime
def test_chrono_system_clock():
# Get the time from both c++ and datetime
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
# The returned value should be a datetime
assert isinstance(date1, datetime.datetime)
# The numbers should vary by a very small amount (time it took to execute)
diff = abs(date1 - date2)
# There should never be a days/seconds difference
assert diff.days == 0
assert diff.seconds == 0
# We test that no more than about 0.5 seconds passes here
# This makes sure that the dates created are very close to the same
# but if the testing system is incredibly overloaded this should still pass
assert diff.microseconds < 500000
def test_chrono_system_clock_roundtrip():
date1 = datetime.datetime.today()
# Roundtrip the time
date2 = m.test_chrono2(date1)
# The returned value should be a datetime
assert isinstance(date2, datetime.datetime)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
def test_chrono_system_clock_roundtrip_date():
date1 = datetime.date.today()
# Roundtrip the time
datetime2 = m.test_chrono2(date1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
# Year, Month & Day should be the same after the round trip
assert date1.year == date2.year
assert date1.month == date2.month
assert date1.day == date2.day
# There should be no time information
assert time2.hour == 0
assert time2.minute == 0
assert time2.second == 0
assert time2.microsecond == 0
def test_chrono_system_clock_roundtrip_time():
time1 = datetime.datetime.today().time()
# Roundtrip the time
datetime2 = m.test_chrono2(time1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# Hour, Minute, Second & Microsecond should be the same after the round trip
assert time1.hour == time2.hour
assert time1.minute == time2.minute
assert time1.second == time2.second
assert time1.microsecond == time2.microsecond
# There should be no date information (i.e. date = python base date)
assert date2.year == 1970
assert date2.month == 1
assert date2.day == 1
def test_chrono_duration_roundtrip():
# Get the difference between two times (a timedelta)
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
# Make sure this is a timedelta
assert isinstance(diff, datetime.timedelta)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_duration_subtraction_equivalence():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_duration_subtraction_equivalence_date():
date1 = datetime.date.today()
date2 = datetime.date.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_steady_clock():
time1 = m.test_chrono5()
assert isinstance(time1, datetime.timedelta)
def test_chrono_steady_clock_roundtrip():
time1 = datetime.timedelta(days=10, seconds=10, microseconds=100)
time2 = m.test_chrono6(time1)
assert isinstance(time2, datetime.timedelta)
# They should be identical (no information lost on roundtrip)
assert time1.days == time2.days
assert time1.seconds == time2.seconds
assert time1.microseconds == time2.microseconds
def test_floating_point_duration():
# Test using a floating point number in seconds
time = m.test_chrono7(35.525123)
assert isinstance(time, datetime.timedelta)
assert time.seconds == 35
assert 525122 <= time.microseconds <= 525123
diff = m.test_chrono_float_diff(43.789012, 1.123456)
assert diff.seconds == 42
assert 665556 <= diff.microseconds <= 665557
def test_nano_timepoint():
time = datetime.datetime.now()
time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60))
assert(time1 == time + datetime.timedelta(seconds=60))
| {
"content_hash": "de874944e9bfef6a29ae37111b4a22d1",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 80,
"avg_line_length": 29.21590909090909,
"alnum_prop": 0.6960326721120187,
"repo_name": "BYVoid/OpenCC",
"id": "55c95440655145f9b3c0c77d9e2d878a29f7fb45",
"size": "5142",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "deps/pybind11-2.5.0/tests/test_chrono.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1195"
},
{
"name": "C",
"bytes": "689"
},
{
"name": "C++",
"bytes": "215008"
},
{
"name": "CMake",
"bytes": "15826"
},
{
"name": "JavaScript",
"bytes": "7133"
},
{
"name": "Makefile",
"bytes": "2528"
},
{
"name": "Python",
"bytes": "21569"
},
{
"name": "Shell",
"bytes": "2247"
},
{
"name": "TypeScript",
"bytes": "912"
}
],
"symlink_target": ""
} |
from lib.ConfigLoader import ConfigLoader
from resources.MySql import MySql
class Resources(object):
@staticmethod
def configs():
return [MySql.config()]
def __init__(self, logger, configs):
self.logger = logger
mysql_config = ConfigLoader.get_config_by_type(configs, type(MySql.config()))
self.db_loader = lambda : MySql(logger, mysql_config)
| {
"content_hash": "3fe03a2648a788dc8a22ebbe26f66aed",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 32.5,
"alnum_prop": 0.6794871794871795,
"repo_name": "JimboMonkey1234/pushserver",
"id": "99db3828e0f5e571cb8f47b1244079c01f7541b9",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/Resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24373"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
import ctypes
import faulthandler
import io
import itertools
import logging
import multiprocessing
import os
import pickle
import sys
import textwrap
import unittest
from importlib import import_module
from io import StringIO
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import (
NullTimeKeeper, TimeKeeper, setup_databases as _setup_databases,
setup_test_environment, teardown_databases as _teardown_databases,
teardown_test_environment,
)
from django.utils.datastructures import OrderedSet
from django.utils.version import PY37
try:
import ipdb as pdb
except ImportError:
import pdb
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def addSubTest(self, test, subtest, err):
super().addSubTest(test, subtest, err)
if err is not None:
self.debug_sql_stream.seek(0)
errors = self.failures if issubclass(err[0], test.failureException) else self.errors
errors[-1] = errors[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln(self.separator2)
self.stream.writeln(sql_debug)
class PDBDebugResult(unittest.TextTestResult):
"""
Custom result class that triggers a PDB session when an error or failure
occurs.
"""
def addError(self, test, err):
super().addError(test, err)
self.debug(err)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug(err)
def debug(self, error):
exc_type, exc_value, traceback = error
print("\nOpening PDB: %r" % exc_value)
pdb.post_mortem(traceback)
class RemoteTestResult:
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
if tblib is not None:
tblib.pickling_support.install()
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print("""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(test, subtest, pickle_exc))
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
python -m pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
# Follow Python 3.5's implementation of unittest.TestResult.addSubTest()
# by not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(('addSubTest', self.test_index, subtest, err))
self.stop_if_failfast()
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""Default number of test processes when using the --parallel option."""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
if multiprocessing.get_start_method() != 'fork':
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id))
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast = args
runner = runner_class(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter],
)
args = [
(self.runner_class, index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
def __iter__(self):
return iter(self.subsuites)
class DiscoverRunner:
"""A Django test runner that uses unittest2 test discovery."""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_mode=False, debug_sql=False, parallel=0,
tags=None, exclude_tags=None, test_name_patterns=None,
pdb=False, buffer=False, enable_faulthandler=True,
timing=False, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
if not faulthandler.is_enabled() and enable_faulthandler:
try:
faulthandler.enable(file=sys.stderr.fileno())
except (AttributeError, io.UnsupportedOperation):
faulthandler.enable(file=sys.__stderr__.fileno())
self.pdb = pdb
if self.pdb and self.parallel > 1:
raise ValueError('You cannot use --pdb with parallel tests; pass --parallel=1 to use it.')
self.buffer = buffer
if self.buffer and self.parallel > 1:
raise ValueError(
'You cannot use -b/--buffer with parallel tests; pass '
'--parallel=1 to use it.'
)
self.test_name_patterns = None
self.time_keeper = TimeKeeper() if timing else NullTimeKeeper()
if test_name_patterns:
# unittest does not export the _convert_select_pattern function
# that converts command-line arguments to patterns.
self.test_name_patterns = {
pattern if '*' in pattern else '*%s*' % pattern
for pattern in test_name_patterns
}
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
'-t', '--top-level-directory', dest='top_level',
help='Top level of project for unittest discovery.',
)
parser.add_argument(
'-p', '--pattern', default="test*.py",
help='The test matching pattern. Defaults to test*.py.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Preserves the test DB between runs.'
)
parser.add_argument(
'-r', '--reverse', action='store_true',
help='Reverses test cases order.',
)
parser.add_argument(
'--debug-mode', action='store_true',
help='Sets settings.DEBUG to True.',
)
parser.add_argument(
'-d', '--debug-sql', action='store_true',
help='Prints logged SQL queries on failure.',
)
parser.add_argument(
'--parallel', nargs='?', default=1, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', action='append', dest='tags',
help='Run only tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', action='append', dest='exclude_tags',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs a debugger (pdb, or ipdb if installed) on error or failure.'
)
parser.add_argument(
'-b', '--buffer', action='store_true',
help='Discard output from passing tests.',
)
parser.add_argument(
'--no-faulthandler', action='store_false', dest='enable_faulthandler',
help='Disables the Python faulthandler module during tests.',
)
parser.add_argument(
'--timing', action='store_true',
help=(
'Output timings, including database set up and total run time.'
),
)
if PY37:
parser.add_argument(
'-k', action='append', dest='test_name_patterns',
help=(
'Only run test methods and classes that match the pattern '
'or substring. Can be used multiple times. Same as '
'unittest -k option.'
),
)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
self.test_loader.testNamePatterns = self.test_name_patterns
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
if self.tags or self.exclude_tags:
if self.verbosity >= 2:
if self.tags:
print('Including test tag(s): %s.' % ', '.join(sorted(self.tags)))
if self.exclude_tags:
print('Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags)))
suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
self.parallel = min(self.parallel, parallel_units)
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity, self.interactive, time_keeper=self.time_keeper, keepdb=self.keepdb,
debug_sql=self.debug_sql, parallel=self.parallel, **kwargs
)
def get_resultclass(self):
if self.debug_sql:
return DebugSQLTextTestResult
elif self.pdb:
return PDBDebugResult
def get_test_runner_kwargs(self):
return {
'failfast': self.failfast,
'resultclass': self.get_resultclass(),
'verbosity': self.verbosity,
'buffer': self.buffer,
}
def run_checks(self, databases):
# Checks are run after database creation since some checks require
# database access.
call_command('check', verbosity=self.verbosity, databases=databases)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def _get_databases(self, suite):
databases = set()
for test in suite:
if isinstance(test, unittest.TestCase):
test_databases = getattr(test, 'databases', None)
if test_databases == '__all__':
return set(connections)
if test_databases:
databases.update(test_databases)
else:
databases.update(self._get_databases(test))
return databases
def get_databases(self, suite):
databases = self._get_databases(suite)
if self.verbosity >= 2:
unused_databases = [alias for alias in connections if alias not in databases]
if unused_databases:
print('Skipping setup of unused database(s): %s.' % ', '.join(sorted(unused_databases)))
return databases
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Return the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
databases = self.get_databases(suite)
with self.time_keeper.timed('Total database setup'):
old_config = self.setup_databases(aliases=databases)
run_failed = False
try:
self.run_checks(databases)
result = self.run_suite(suite)
except Exception:
run_failed = True
raise
finally:
try:
with self.time_keeper.timed('Total database teardown'):
self.teardown_databases(old_config)
self.teardown_test_environment()
except Exception:
# Silence teardown exceptions if an exception was raised during
# runs to avoid shadowing it.
if not run_failed:
raise
self.time_keeper.print_results()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a Python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def reorder_suite(suite, classes, reverse=False):
"""
Reorder a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, sort tests within classes in opposite order but
don't reverse test classes.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partition a test suite by test type. Also prevent duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
filtered_suite = suite_class()
for test in suite:
if isinstance(test, suite_class):
filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags))
else:
test_tags = set(getattr(test, 'tags', set()))
test_fn_name = getattr(test, '_testMethodName', str(test))
test_fn = getattr(test, test_fn_name, test)
test_fn_tags = set(getattr(test_fn, 'tags', set()))
all_tags = test_tags.union(test_fn_tags)
matched_tags = all_tags.intersection(tags)
if (matched_tags or not tags) and not all_tags.intersection(exclude_tags):
filtered_suite.addTest(test)
return filtered_suite
| {
"content_hash": "17839f5d99e2cd18909f9aae2d8cb8e9",
"timestamp": "",
"source": "github",
"line_count": 833,
"max_line_length": 115,
"avg_line_length": 35.88475390156062,
"alnum_prop": 0.6136089923725412,
"repo_name": "googleinterns/django",
"id": "83f06c72f307d284a26676870c4a2743c09d4eac",
"size": "29892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/test/runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "79183"
},
{
"name": "HTML",
"bytes": "228941"
},
{
"name": "JavaScript",
"bytes": "136792"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "14076970"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
import os
import os.path as op
import warnings
import numpy as np
from scipy import sparse
from ..externals.six import string_types
from ..utils import verbose, logger
from ..io.pick import (channel_type, pick_info, pick_types,
_check_excludes_includes)
from ..io.constants import FIFF
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels"""
system = '306m'
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
return system
def _contains_ch_type(info, ch_type):
"""Check whether a certain channel type is in an info object
Parameters
---------
info : instance of mne.io.meas_info.Info
The measurement information.
ch_type : str
the channel type to be checked for
Returns
-------
has_ch_type : bool
Whether the channel type is present or not.
"""
if not isinstance(ch_type, string_types):
raise ValueError('`ch_type` is of class {actual_class}. It must be '
'`str`'.format(actual_class=type(ch_type)))
valid_channel_types = ['grad', 'mag', 'planar1', 'planar2', 'eeg', 'stim',
'eog', 'emg', 'ecg', 'ref_meg', 'resp', 'exci',
'ias', 'syst', 'seeg', 'misc']
if ch_type not in valid_channel_types:
raise ValueError('ch_type must be one of %s, not "%s"'
% (valid_channel_types, ch_type))
if info is None:
raise ValueError('Cannot check for channels of type "%s" because info '
'is None' % (ch_type,))
return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
def _get_ch_type(inst, ch_type):
"""Helper to choose a single channel type (usually for plotting)
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:
if type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(candidates, verbose=None):
"""Equalize channel picks for a collection of MNE-Python objects
Parameters
----------
candidates : list
list Raw | Epochs | Evoked.
verbose : None | bool
whether to be verbose or not.
Notes
-----
This function operates inplace.
"""
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..time_frequency import AverageTFR
if not all(isinstance(c, (_BaseRaw, _BaseEpochs, Evoked, AverageTFR))
for c in candidates):
valid = ['Raw', 'Epochs', 'Evoked', 'AverageTFR']
raise ValueError('candidates must be ' + ' or '.join(valid))
chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
chan_template = candidates[chan_max_idx].ch_names
logger.info('Identiying common channels ...')
channels = [set(c.ch_names) for c in candidates]
common_channels = set(chan_template).intersection(*channels)
dropped = list()
for c in candidates:
drop_them = list(set(c.ch_names) - common_channels)
if drop_them:
c.drop_channels(drop_them)
dropped.extend(drop_them)
if dropped:
dropped = list(set(dropped))
logger.info('Dropped the following channels:\n%s' % dropped)
else:
logger.info('all channels are corresponding, nothing to do.')
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs
"""
def __contains__(self, ch_type):
"""Check channel type membership"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
class SetChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs
"""
def _get_channel_positions(self, picks=None):
"""Gets channel locations from info
Parameters
----------
picks : array-like of int | None
Indices of channels to include. If None (default), all meg and eeg
channels that are available are returned (bad channels excluded).
Notes
-----
.. versionadded:: 0.9.0
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
def set_channel_types(self, mapping):
"""Define the sensor type of channels.
Note: The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst
Parameters
----------
mapping : dict
a dictionary mapping a channel to a sensor type (str)
{'EEG061': 'eog'}.
Notes
-----
.. versionadded:: 0.9.0
"""
human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH}
human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE}
unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_NONE: 'NA'}
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.' % (ch_type,
", ".join(human2unit.keys())))
# Set sensor type
self.info['chs'][c_ind]['kind'] = human2fiff[ch_type]
unit_old = self.info['chs'][c_ind]['unit']
unit_new = human2unit[ch_type]
if unit_old != human2unit[ch_type]:
warnings.warn("The unit for Channel %s has changed "
"from %s to %s." % (ch_name,
unit2human[unit_old],
unit2human[unit_new]))
self.info['chs'][c_ind]['unit'] = human2unit[ch_type]
if ch_type in ['eeg', 'seeg']:
self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_EEG
else:
self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_NONE
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
def set_montage(self, montage):
"""Set EEG sensor configuration
Parameters
----------
montage : instance of Montage or DigMontage
Notes
-----
Operates in place.
.. versionadded:: 0.9.0
"""
from .montage import _set_montage
_set_montage(self.info, montage)
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR
"""
def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, include=[], exclude='bads', selection=None,
copy=False):
"""Pick some channels by type and names
Parameters
----------
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the
reference channels are only included if compensations are present.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
include : list of string
List of additional channels to include. If empty do not include
any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
copy : bool
If True, returns new instance. Else, modifies in place. Defaults to
False.
Notes
-----
.. versionadded:: 0.9.0
"""
inst = self.copy() if copy else self
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, include=include, exclude=exclude,
selection=selection)
inst._pick_drop_channels(idx)
return inst
def pick_channels(self, ch_names, copy=False):
"""Pick some channels
Parameters
----------
ch_names : list
The list of channels to select.
copy : bool
If True, returns new instance. Else, modifies in place. Defaults to
False.
See Also
--------
drop_channels
Notes
-----
.. versionadded:: 0.9.0
"""
inst = self.copy() if copy else self
_check_excludes_includes(ch_names)
idx = [inst.ch_names.index(c) for c in ch_names if c in inst.ch_names]
inst._pick_drop_channels(idx)
return inst
def drop_channels(self, ch_names, copy=False):
"""Drop some channels
Parameters
----------
ch_names : list
The list of channels to remove.
copy : bool
If True, returns new instance. Else, modifies in place. Defaults to
False.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
inst = self.copy() if copy else self
bad_idx = [inst.ch_names.index(c) for c in ch_names
if c in inst.ch_names]
idx = np.setdiff1d(np.arange(len(inst.ch_names)), bad_idx)
inst._pick_drop_channels(idx)
return inst
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..time_frequency import AverageTFR
if isinstance(self, (_BaseRaw, _BaseEpochs)):
if not self.preload:
raise RuntimeError('If Raw or Epochs, data must be preloaded '
'to drop or pick channels')
def inst_has(attr):
return getattr(self, attr, None) is not None
if inst_has('picks'):
self.picks = self.picks[idx]
if inst_has('_cals'):
self._cals = self._cals[idx]
self.info = pick_info(self.info, idx, copy=False)
if inst_has('_projector'):
self._projector = self._projector[idx][:, idx]
if isinstance(self, _BaseRaw) and inst_has('_data'):
self._data = self._data.take(idx, axis=0)
elif isinstance(self, _BaseEpochs) and inst_has('_data'):
self._data = self._data.take(idx, axis=1)
elif isinstance(self, AverageTFR) and inst_has('data'):
self.data = self.data.take(idx, axis=0)
elif isinstance(self, Evoked):
self.data = self.data.take(idx, axis=0)
def add_channels(self, add_list, copy=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object
copy : bool
Whether to return a new instance or modify in place
Returns
-------
out : MNE object of type(self)
An object with new channels appended (will be the same
object if copy==False)
"""
# avoid circular imports
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..io.meas_info import _merge_info
if not isinstance(add_list, (list, tuple)):
raise AssertionError('Input must be a list or tuple of objs')
# Object-specific checks
if isinstance(self, (_BaseRaw, _BaseEpochs)):
if not all([inst.preload for inst in add_list] + [self.preload]):
raise AssertionError('All data must be preloaded')
data_name = '_data'
if isinstance(self, _BaseRaw):
con_axis = 0
comp_class = _BaseRaw
elif isinstance(self, _BaseEpochs):
con_axis = 1
comp_class = _BaseEpochs
else:
data_name = 'data'
con_axis = 0
comp_class = type(self)
if not all(isinstance(inst, comp_class) for inst in add_list):
raise AssertionError('All input data must be of same type')
data = [getattr(inst, data_name) for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
if not ((shapes[0] - shapes) == 0).all():
raise AssertionError('All dimensions except channels must match')
# Create final data / info objects
data = np.concatenate(data, axis=con_axis)
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos)
# Now update the attributes
if copy is True:
out = self.copy()
else:
out = self
setattr(out, data_name, data)
out.info = new_info
if isinstance(self, _BaseRaw):
out._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
return out
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs
"""
def interpolate_bads(self, reset_bads=True, mode='accurate'):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
Returns
-------
self : mne.io.Raw, mne.Epochs or mne.Evoked
The interpolated data.
Notes
-----
.. versionadded:: 0.9.0
"""
from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
if getattr(self, 'preload', None) is False:
raise ValueError('Data must be preloaded.')
_interpolate_bads_eeg(self)
_interpolate_bads_meg(self, mode=mode)
if reset_bads is True:
self.info['bads'] = []
return self
def rename_channels(info, mapping):
"""Rename channels.
Parameters
----------
info : dict
Measurement info.
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
"""
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
if any(not isinstance(new_name[1], string_types)
for new_name in new_names):
raise ValueError('New channel mapping must only be to strings')
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the reampping in info
info['bads'] = bads
info['ch_names'] = ch_names
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Helper to unpack mat files in Python"""
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
def read_ch_connectivity(fname, picks=None):
"""Parse FieldTrip neighbors .mat file
More information on these neighbor definitions can be found on the
related FieldTrip documentation pages:
http://fieldtrip.fcdonders.nl/template/neighbours
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
picks : array-like of int, shape (n_channels,)
The indices of the channels to include. Must match the template.
Defaults to None.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], string_types)
neighbors = [_recursive_flatten(c, string_types) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
if picks is not None:
if max(picks) >= len(ch_names):
raise ValueError('The picks must be compatible with '
'channels. Found a pick ({}) which exceeds '
'the channel range ({})'
.format(max(picks), len(ch_names)))
connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
if picks is not None:
# picking before constructing matrix is buggy
connectivity = connectivity[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return connectivity, ch_names
def _ch_neighbor_connectivity(ch_names, neighbors):
"""Compute sensor connectivity matrix
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = set([c for d in neighbors for c in d])
rest = set(ch_names) - set_neighbors
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, string_types) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_connectivity = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
ch_connectivity = sparse.csr_matrix(ch_connectivity)
return ch_connectivity
| {
"content_hash": "21ff9671c78570cb208c40f541feab20",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 79,
"avg_line_length": 35.356368563685635,
"alnum_prop": 0.544897098838769,
"repo_name": "lorenzo-desantis/mne-python",
"id": "aae1a48597e0542298d359e9db324afb77a19994",
"size": "26403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/channels/channels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4322690"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.