text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
#import cv2
import numpy as np
from collections import namedtuple
# For reading size information from batches
import struct
from PIL import Image
_ModelData = namedtuple('_ModelData', ['MODEL_PATH', 'INPUT_SHAPE', 'DTYPE'])
ModelData = _ModelData(MODEL_PATH = "resnet50v1/resnet50v1.onnx",
INPUT_SHAPE = (1,3, 224, 224),
DTYPE = trt.float32 )
def center_crop(img, output_size):
image_width, image_height = img.size
crop_height, crop_width = output_size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
def crop(img, top, left, height, width):
return img.crop((left, top, left + width, top + height))
return crop(img, crop_top, crop_left, crop_height, crop_width)
def resize_with_aspectratio(img, out_size, interpolation = Image.BILINEAR):
w, h = img.size
size = out_size
if (w <= h and w == size) or (h <= w and h == out_size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
img = img.resize((ow,oh), interpolation)
return img
class RN50Calibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, calib_batch_size=1, calib_max_batches=500, force_calibration=False,
cache_file="code/resnet50/tensorrt/calibrator.cache",
image_dir="build/data/imagenet",
calib_data_map="data_maps/imagenet/cal_map.txt"
):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.calib_batch_size = calib_batch_size
self.calib_max_batches = calib_max_batches
self.force_calibration = force_calibration
self.cache_file = cache_file
# Get a list of all the batch files in the batch folder.
image_lists = []
with open(calib_data_map) as f:
for line in f:
image_lists.append(line.split()[0])
_batch_files = list()
for f in os.listdir(image_dir):
if f.endswith('.JPEG') and f in image_lists:
_batch_files.append(os.path.join(image_dir,f))
self.batch_files = np.array(_batch_files)
# Find out the shape of a batch and then allocate a device buffer of that size.
self.shape = ModelData.INPUT_SHAPE
# Each element of the calibration data is a float32.
self.device_input = cuda.mem_alloc(trt.volume(self.shape) * trt.float32.itemsize)
# Create a generator that will give us batches. We can use next() to iterate over the result.
def load_batches():
for f in self.batch_files:
shape, data = self.read_batch_file(f)
yield shape, data
self.batches = load_batches()
# This function is used to load calibration data from the calibration batch files.
# In this implementation, one file corresponds to one batch, but it is also possible to use
# aggregate data from multiple files, or use only data from portions of a file.
def read_batch_file(self, filename):
data = self.normalize_image(filename)
shape = ModelData.INPUT_SHAPE
fn = filename.split('/')[-1]
print(shape, np.shape(data))
return shape, data
def get_batch_size(self):
return self.shape[0]
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
#def get_batch(self, names, a_p):
def get_batch(self, names):
try:
# Get a single batch.
_, data = next(self.batches)
# Copy to device, then return a list containing pointers to input device buffers.
#cuda.memcpy_htod(self.device_input, data)
cuda.memcpy_htod(self.device_input, np.ascontiguousarray(data))
return [int(self.device_input)]
except StopIteration:
# When we're out of batches, we return either [] or None.
# This signals to TensorRT that there is no calibration data remaining.
return None
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
print('reading calibration file')
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
if not os.path.exists(self.cache_file):
print('writing calibration file')
with open(self.cache_file, "wb") as f:
f.write(cache)
def clear_cache(self):
self.cache = None
def __del__(self):
self.device_input.free()
def normalize_image(self, img):
img = Image.open(img)
img = img.convert('RGB')
img = resize_with_aspectratio(img, 256)
img = center_crop(img, (224, 224))
img = np.asarray(img, dtype='float32')
img /= 255.0
mean = np.array([0.485,0.456,0.406], dtype=np.float32)
std = np.array([0.229,0.224,0.225], dtype=np.float32)
img = (img - mean) / std
img = img.transpose([2, 0, 1])
#img = np.asarray(img.reshape((3,224,224)), dtype='float32')
return img
|
{
"content_hash": "690084dc8f49dafe0e0abd4d178d7a90",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 102,
"avg_line_length": 37.38562091503268,
"alnum_prop": 0.6166083916083916,
"repo_name": "mlperf/inference_results_v0.7",
"id": "b0d9e6b733d26b2921dbd63f8054c5108e5cf432",
"size": "6331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open/Inspur/code/resnet50/tensorrt/calibrator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148628"
},
{
"name": "C++",
"bytes": "14551146"
},
{
"name": "CMake",
"bytes": "380597"
},
{
"name": "Cuda",
"bytes": "3604332"
},
{
"name": "Dockerfile",
"bytes": "32985"
},
{
"name": "Makefile",
"bytes": "103953"
},
{
"name": "Objective-C",
"bytes": "5470"
},
{
"name": "Python",
"bytes": "11627827"
},
{
"name": "Roff",
"bytes": "153"
},
{
"name": "Shell",
"bytes": "349257"
}
],
"symlink_target": ""
}
|
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
import logging
import traceback
import warnings
import json
import hashlib
import re
from luigi import six
from luigi import parameter
from luigi.task_register import Register
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
def namespace(namespace=None):
"""
Call to set namespace of tasks declared after the call.
If called without arguments or with ``None`` as the namespace, the namespace
is reset, which is recommended to do at the end of any file where the
namespace is set to avoid unintentionally setting namespace on tasks outside
of the scope of the current file.
The namespace of a Task can also be changed by specifying the property
``task_namespace``. This solution has the advantage that the namespace
doesn't have to be restored.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
"""
Register._default_namespace = namespace
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and no
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Each :py:class:`~luigi.Parameter` of the Task should be declared as members:
.. code:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
second_param = luigi.Parameter()
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
``Task.task_namespace``
optional string which is prepended to the task name for the sake of
scheduling. If it isn't overridden in a Task, whatever was last declared
using `luigi.namespace` will be used.
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or worker-timeout value in config file
#: Only works when using multiple workers.
worker_timeout = None
@property
def owner_email(self):
'''
Override this to send out additional error emails to task owner, in addition to the one
defined in `core`.`error-email`. This should return a string or a list of strings. e.g.
'[email protected]' or ['[email protected]', '[email protected]']
'''
return None
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
@property
def task_family(self):
"""
Convenience method since a property on the metaclass isn't directly accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1]._counter)
return params
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_name = cls.task_family
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = param_obj.normalize(arg)
# Then the keyword arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = params_dict[param_name].normalize(arg)
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_name, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_name, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register args and kwargs as an attribute on the class. Might be useful
self.param_args = tuple(value for key, value in param_values)
self.param_kwargs = dict(param_values)
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
TASK_ID_INCLUDE_PARAMS = 3
TASK_ID_TRUNCATE_PARAMS = 16
TASK_ID_TRUNCATE_HASH = 10
TASK_ID_INVALID_CHAR_REGEX = r'[^A-Za-z0-9_]'
params = self.to_str_params(only_significant=True)
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = re.sub(TASK_ID_INVALID_CHAR_REGEX, '_', param_summary)
self.task_id = '{}_{}_{}'.format(self.task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
self.__hash = hash(self.task_id)
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
@classmethod
def from_str_params(cls, params_str):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value as string.
"""
kwargs = {}
for param_name, param in cls.get_params():
if param_name in params_str:
kwargs[param_name] = param.parse(params_str[param_name])
return cls(**kwargs)
def to_str_params(self, only_significant=False):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if (not only_significant) or params[param_name].significant:
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
k = self.param_kwargs.copy()
k.update(six.iteritems(kwargs))
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in k:
new_k[param_name] = k[param_name]
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5')`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.task_family, ', '.join(repr_parts))
return task_str
def __eq__(self, other):
return self.__class__ == other.__class__ and self.param_args == other.param_args
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exist.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
The returned value of this method is json encoded and sent to the scheduler
as the `expl` argument. Its string representation will be used as the
body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
return [t for t in parameter_tuples if cls(t).complete()]
def externalize(task):
"""
Returns an externalized version of the Task.
See :py:class:`ExternalTask`.
"""
task.run = NotImplemented
return task
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = NotImplemented
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""
Class for configuration. See :ref:`ConfigClasses`.
"""
# TODO: let's refactor Task & Config so that it inherits from a common
# ParamContainer base class
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
r = {}
for k, v in six.iteritems(struct):
r[k] = getpaths(v)
return r
else:
# Remaining case: assume r is iterable...
try:
s = list(struct)
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
return [getpaths(r) for r in s]
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
|
{
"content_hash": "c31f7730798a128f4a38bb8c0ab9b333",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 151,
"avg_line_length": 33.81909547738694,
"alnum_prop": 0.6207033184744923,
"repo_name": "bmaggard/luigi",
"id": "c48360df43c37f9008e1302f0dbbd6f7ec620193",
"size": "20793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "30362"
},
{
"name": "JavaScript",
"bytes": "79874"
},
{
"name": "Python",
"bytes": "1244002"
},
{
"name": "Shell",
"bytes": "2464"
}
],
"symlink_target": ""
}
|
from cms.api import add_plugin
from django.template import RequestContext
from djangocms_helper.base_test import BaseTestCase
class PluginTest(BaseTestCase):
_pages_data = (
{'en': {'title': 'Page title', 'template': 'page.html', 'publish': True},
'fr': {'title': 'Titre', 'publish': True},
'it': {'title': 'Titolo pagina', 'publish': False}},
)
def test_plugin_render(self):
pages = self.get_pages()
placeholder = pages[0].placeholders.get(slot='content')
plugin = add_plugin(placeholder, 'MarkItUpPlugin', 'en', body='**Bold**')
request = self.get_page_request(pages[0], self.user, r'/en/', lang='en', edit=True)
context = RequestContext(request, {})
rendered = plugin.render_plugin(context, placeholder)
self.assertTrue('<strong>Bold</strong>' in rendered)
def test_plugin_render_empty(self):
test_string = '''
'''
pages = self.get_pages()
placeholder = pages[0].placeholders.get(slot='content')
plugin = add_plugin(placeholder, 'MarkItUpPlugin', 'en', body=test_string)
request = self.get_page_request(pages[0], self.user, r'/en/', lang='en', edit=True)
context = RequestContext(request, {})
rendered = plugin.render_plugin(context, placeholder)
self.assertTrue('<p></p>' in rendered)
|
{
"content_hash": "d4819bf49fa38c1326adbd9c99291e93",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 91,
"avg_line_length": 40.14705882352941,
"alnum_prop": 0.6227106227106227,
"repo_name": "nephila/djangocms-markitup",
"id": "df6f75d821be548bf21dae763824722471136fdf",
"size": "1389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7783"
},
{
"name": "Python",
"bytes": "12438"
},
{
"name": "Shell",
"bytes": "6531"
}
],
"symlink_target": ""
}
|
from django.db.models import Q
from django.conf import settings
from requests.exceptions import MissingSchema
from requests.exceptions import InvalidSchema
from requests.exceptions import InvalidURL
from django.utils.module_loading import import_string
from doh.models import Hook
from .mixins import DelivererMixin
import datetime
import ujson
import requests
import celery
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
class HooksDeliverer(celery.task.Task, DelivererMixin):
DEFAULT_DUMP = "{}"
def dump_payload(self, payload):
if isinstance(payload, basestring):
return payload
return ujson.dumps(payload)
def deliver_to_target(self, target, data):
if not hasattr(self, '_deliverer'):
self._deliverer = import_string(getattr(settings,
"HOOK_ELEMENT_DELIVERER", "doh.deliverers.deliver_hook"
))
return self._deliverer(target, data)
def delivering(self, hooks, payload=None):
if payload != None:
dump = self.dump_payload(payload)
for each in hooks:
if payload == None:
instance = each.content_object
if hasattr(instance, 'get_static_payload'):
payload = instance.get_static_payload(each)
dump = self.dump_payload( payload )
elif hasattr(instance, 'get_dynamic_payload'):
dump = self.dump_payload( instance.get_dynamic_payload(each) )
else:
dump = self.DEFAULT_DUMP
yield self.deliver_to_target(each.target, dump)
def deliver_hooks(self, hooks, payload=None):
for each in self.delivering(hooks, payload):
continue
def filter_hooks(self, app_label, object_name, instance_pk, action):
model = get_model(app_label, object_name)
return Hook.objects.fetch(
model=model, object_id=instance_pk, action=action
)
def after_deliver(self, hooks):
return
def run(self, app_label, object_name, instance_pk, action, payload=None):
hooks = self.filter_hooks(app_label, object_name, instance_pk, action)
if payload != None:
self.deliver_hooks(hooks, payload=payload)
else:
self.deliver_hooks(hooks)
self.after_deliver(hooks)
class HookDeliverer(celery.task.Task, DelivererMixin):
def after_deliver(self, response):
if response.status_code == 410:
Hook.object.filter(target=response.url).delete()
def run(self, target, payload):
try:
response = requests.post(target, data=payload)
except (MissingSchema, InvalidSchema, InvalidURL) as e:
Hook.objects.filter(target=target).delete()
else:
self.after_deliver(response)
|
{
"content_hash": "c79411c1c09ad89c753a493143f0724a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 82,
"avg_line_length": 35.09411764705882,
"alnum_prop": 0.6265504525645323,
"repo_name": "laginha/django-object-hooks",
"id": "d21199f17e253aad55f89d43ae8f593fddf8dc4e",
"size": "2983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/doh/deliverers/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17896"
}
],
"symlink_target": ""
}
|
import time
from typing import Optional, Union
from urllib.parse import urlparse
import requests
class FreenomSession(requests.Session):
last_request_time: float
previous_url: Optional[str] = None
request_cooldown = 3.0
retry = 3
def __init__(self):
super().__init__()
self.last_request_time = time.monotonic()
def request(self, method, url, *args, **kwargs) -> requests.Response:
if abs(self.last_request_time - time.monotonic()) < self.request_cooldown:
time.sleep(self.request_cooldown)
res: Optional[requests.Response] = None
for _ in range(self.retry):
res = super().request(method, url, *args, **kwargs)
self.last_request_time = time.monotonic()
self.previous_url = url
retry = False
if res.status_code == 503:
if "Back-end server is at capacity" in self._decode_reason(res.reason):
retry = True
if res.status_code == 504:
retry = True
if retry:
time.sleep(self.request_cooldown)
continue
return res
assert res is not None # nosec
return res
def _decode_reason(self, reason: Union[str, bytes]) -> str:
if isinstance(reason, bytes):
try:
reason = reason.decode('utf-8')
except UnicodeDecodeError:
if isinstance(reason, bytes):
reason = reason.decode('iso-8859-1')
return reason
def prepare_request(self, request: requests.Request):
request = self._inject_headers(request)
return super().prepare_request(request)
def _inject_headers(self, request: requests.Request) -> requests.Request:
if request.headers is None:
request.headers = dict()
if 'Host' not in request.headers:
url = urlparse(request.url)
request.headers['Host'] = url.hostname
if 'Referer' not in request.headers:
if self.previous_url is not None:
request.headers['Referer'] = self.previous_url
else:
request.headers['Referer'] = request.url
return request
|
{
"content_hash": "a075fc44acaa250c8b7d7c94559fa953",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 87,
"avg_line_length": 34.13636363636363,
"alnum_prop": 0.5752330226364847,
"repo_name": "maxisoft/Freenom-dns-updater",
"id": "f115ee24601bb2652d5e8659e1b2cbaa87801c14",
"size": "2253",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/add-v2-config-file",
"path": "freenom_dns_updater/freenom_session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "41179"
},
{
"name": "Python",
"bytes": "50728"
}
],
"symlink_target": ""
}
|
class PlumbcaConfigNotFound(Exception):
"Raised when the plumbca program not found the configure file."
class MessageFormatError(Exception):
"""hello world."""
|
{
"content_hash": "2f9b03614875a5bddf5ffe53d4ba0695",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 67,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.7529411764705882,
"repo_name": "JasonLai256/plumbca",
"id": "b283d3b97a499078158763059f825da2f8c75dba",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plumbca/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "109561"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
version = '0.1a'
here = os.path.dirname(__file__)
with open(os.path.join(here, 'README.rst')) as fp:
longdesc = fp.read()
with open(os.path.join(here, 'CHANGELOG.rst')) as fp:
longdesc += "\n\n" + fp.read()
setup(
name='jobcontrol',
version=version,
packages=find_packages(),
url='https://github.com/rshk/jobcontrol',
license='Apache 2.0 License',
author='Samuele Santi',
author_email='[email protected]',
description='Job scheduling and tracking library',
long_description=longdesc,
install_requires=[
'PrettyTable', # For creating tables
'celery[redis]', # For async tasks
'click', # For the CLI
'colorama', # For color stuff
'docutils', # For rendering docstings in the web UI
'flask', # For the webapp; utils used around the place
'humanize', # For nicer display of information
'nicelog', # For colorful logging
'psycopg2', # For postgresql storage
'pygments', # For colorizing source code in the web UI
'pyyaml', # For loding YAML configuration
'werkzeug', # Flask dependency, but utils used around too
],
# tests_require=tests_require,
# test_suite='tests',
classifiers=[
'License :: OSI Approved :: Apache Software License',
# 'Development Status :: 1 - Planning',
'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
package_data={'': ['README.rst', 'CHANGELOG.rst']},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'jobcontrol-cli = jobcontrol.cli:main'
]
})
|
{
"content_hash": "7babeb9efd0f41cf802232613e8ef6c2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 66,
"avg_line_length": 32.96774193548387,
"alnum_prop": 0.601761252446184,
"repo_name": "rshk/jobcontrol",
"id": "c952e2ecd87e504ed025792f86b4fe65a42135c7",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29368"
},
{
"name": "JavaScript",
"bytes": "2906"
},
{
"name": "Makefile",
"bytes": "8036"
},
{
"name": "Python",
"bytes": "208862"
},
{
"name": "Shell",
"bytes": "6718"
}
],
"symlink_target": ""
}
|
import os, pdb, platform, time, warnings
import ctypes as ct
import numpy as np
MAX_ONES = 1024*256
if platform.system() == 'Windows':
_cudamat = ct.cdll.LoadLibrary('libcudamat.dll')
else:
_cudamat = ct.cdll.LoadLibrary('libcudamat.so')
_cudamat.get_last_cuda_error.restype = ct.c_char_p
_cudamat.cublas_init.restype = ct.c_int
_cudamat.cublas_shutdown.restype = ct.c_int
_cudamat.cuda_set_device.restype = ct.c_int
_cudamat.init_random.restype = ct.c_int
_cudamat.init_empty.restype = ct.c_int
_cudamat.reshape.restype = ct.c_int
_cudamat.copy_to_host.restype = ct.c_int
_cudamat.allocate_device_memory = ct.c_int
_cudamat.copy_to_device.restype = ct.c_int
_cudamat.copy_on_device.restype = ct.c_int
_cudamat.free_device_memory.restype = ct.c_int
_cudamat.get_slice.restype = ct.c_int
_cudamat.get_row_slice.restype = ct.c_int
_cudamat.set_row_slice.restype = ct.c_int
_cudamat.copy_transpose.restype = ct.c_int
_cudamat.get_vector_slice.restype = ct.c_int
_cudamat.fill_with_rand.restype = ct.c_int
_cudamat.fill_with_randn.restype = ct.c_int
_cudamat.add_col_vec.restype = ct.c_int
_cudamat.add_col_mult.restype = ct.c_int
_cudamat.add_row_vec.restype = ct.c_int
_cudamat.mult_by_col_vec.restype = ct.c_int
_cudamat.mult_by_row_vec.restype = ct.c_int
_cudamat.less_than.restype = ct.c_int
_cudamat.less_than_scalar.restype = ct.c_int
_cudamat.greater_than.restype = ct.c_int
_cudamat.greater_than_scalar.restype = ct.c_int
_cudamat.equals.restype = ct.c_int
_cudamat.equals_scalar.restype = ct.c_int
_cudamat.max_by_axis.restype = ct.c_int
_cudamat.sign.restype = ct.c_int
_cudamat.apply_sigmoid.restype = ct.c_int
_cudamat.apply_tanh.restype = ct.c_int
_cudamat.apply_abs.restype = ct.c_int
_cudamat.apply_log_1_plus_exp.restype = ct.c_int
_cudamat.apply_log.restype = ct.c_int
_cudamat.apply_exp.restype = ct.c_int
_cudamat.apply_gamma.restype = ct.c_int
_cudamat.apply_lgamma.restype = ct.c_int
_cudamat.apply_sqrt.restype = ct.c_int
_cudamat.apply_pow.restype = ct.c_int
_cudamat.apply_pow_matrix.restype = ct.c_int
_cudamat.reciprocal.restype = ct.c_int
_cudamat.add_elementwise.restype = ct.c_int
_cudamat.subtract_elementwise.restype = ct.c_int
_cudamat.divide_elementwise.restype = ct.c_int
_cudamat.mult_elementwise.restype = ct.c_int
_cudamat.assign_scalar.restype = ct.c_int
_cudamat.mult_by_scalar.restype = ct.c_int
_cudamat.divide_by_scalar.restype = ct.c_int
_cudamat.add_scalar.restype = ct.c_int
_cudamat.euclid_norm.restype = ct.c_float
_cudamat.selectRows.restype = ct.c_int
_cudamat.setSelectedRows.restype = ct.c_int
_cudamat.vdot.restype = ct.c_float
_cudamat.dot.restype = ct.c_int
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
class CUDAMatException(Exception):
pass
def get_last_cuda_error():
return str(_cudamat.get_last_cuda_error())
def generate_exception(err_code):
"""
Return a CUDAMatException object based on the error code err_code.
"""
if err_code == -1:
return CUDAMatException("Incompatible matrix dimensions.")
elif err_code == -2:
return CUDAMatException("CUBLAS error.")
elif err_code == -3:
return CUDAMatException("CUDA error: " + get_last_cuda_error())
elif err_code == -4:
return CUDAMatException("Operation not supported on views.")
elif err_code == -5:
return CUDAMatException("Operation not supported on transposed matrices.")
elif err_code == -6:
return CUDAMatException("")
elif err_code == -7:
return CUDAMatException("Incompatible transposedness.")
elif err_code == -8:
return CUDAMatException("Matrix is not in device memory.")
elif err_code == -9:
return CUDAMatException("Operation not supported.")
class cudamat(ct.Structure):
_fields_ = [('data_host', ct.POINTER(ct.c_float)),
('data_device', ct.POINTER(ct.c_float)),
('on_device', ct.c_int),
('on_host', ct.c_int),
('size', ct.c_int * 2),
('is_trans', ct.c_int),
('owns_data', ct.c_int)]
class rnd_struct(ct.Structure):
_fields_ = [('dev_rnd_mults', ct.POINTER(ct.c_uint)),
('dev_rnd_words', ct.POINTER(ct.c_longlong))]
class TransposedCUDAMatrix(object):
def __init__(self, mat):
self.mat = cudamat()
ct.memmove(ct.pointer(self.mat), ct.pointer(mat), ct.sizeof(self.mat))
self.mat.is_trans = 1
self.p_mat = ct.pointer(self.mat)
class CUDAMatrix(object):
"""
A CUDAMatrix object represents a matrix of single precision floating point
numbers on a GPU.
"""
def __init__(self, array, copy_to_device = True):
"""
Initializes a new matrix object in one of two ways. If array is a numpy
ndarray, memory for a matrix with the same dimensions is allocated on
the GPU. If the copy_to_device flag is set to True, the GPU matrix is
initialized with the given ndarray. If array is not an ndarray, it must
be a cudamat structure (typically the user will never use this way of
calling __init__).
"""
if type(array) == np.ndarray:
# Convert array to float32 in FORTRAN order
array = reformat(array)
# Initialize as a ndarray-tied matrix.
self.mat = cudamat()
self.size = self.mat.size
self.p_mat = ct.pointer(self.mat)
self.numpy_array = array
_cudamat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))
if copy_to_device:
err_code = _cudamat.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
else:
# Initialize based on existing cudamat structure.
mat = array
self.mat = mat
self.p_mat = ct.pointer(self.mat)
self.T = TransposedCUDAMatrix(self.mat)
# Keep a reference to free device memory in case of a crash.
self.__free_device_memory = _cudamat.free_device_memory
def __del__(self):
try:
if 'p_mat' in self.__dict__:
err_code = self.__free_device_memory(self.p_mat)
if err_code:
raise generate_exception(err_code)
except AttributeError:
pass
@staticmethod
def init_random(seed = 0):
"""
Initialize and seed the random number generator.
"""
NUM_RND_STREAMS = 96*128
CUDAMatrix.rndInitialized = 1
CUDAMatrix.rnd_state = rnd_struct()
CUDAMatrix.rnd_state_p = ct.pointer(CUDAMatrix.rnd_state)
cudamat_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rnd_multipliers_32bit.txt')
err_code = _cudamat.init_random(CUDAMatrix.rnd_state_p, ct.c_int(seed), cudamat_path)
if err_code:
raise generate_exception(err_code)
@property
def shape(self):
return (self.mat.size[0], self.mat.size[1])
def reshape(self, shape):
"""
Reshapes self to have the given shape. The number of elements cannot
change as this only changes how the contents are interpreted.
"""
m = ct.c_uint(shape[0])
n = ct.c_uint(shape[1])
err_code = _cudamat.reshape(self.p_mat, m, n)
if err_code:
raise generate_exception(err_code)
return self
def asarray(self):
"""
Copies the matrix to an ndarray on the CPU and returns it.
"""
self.copy_to_host()
return self.numpy_array
def copy_to_device(self):
"""
Copy the matrix to the GPU.
"""
err_code = _cudamat.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
def copy_to_host(self):
"""
Copy the matrix to the CPU.
"""
if not self.mat.on_host:
# allocate host storage if necessary
m = self.mat.size[0]
n = self.mat.size[1]
self.numpy_array = np.empty((m, n), dtype=np.float32, order = 'F')
self.mat.data_host = self.numpy_array.ctypes.data_as(ct.POINTER(ct.c_float))
self.mat.on_host = 1
err_code = _cudamat.copy_to_host(self.p_mat)
if err_code:
raise generate_exception(err_code)
def assign(self, val):
"""Assign val to self, where val can be a scalar or a CUDAMatrix
with the same dimensions as self. """
if isinstance(val, CUDAMatrix):
err_code = _cudamat.copy_on_device(val.p_mat, self.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.assign_scalar(self.p_mat, ct.c_float(val))
else:
raise ValueError, "Assigned value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return self
def free_device_memory(self):
"""
Free memory used up by the matrix on the GPU.
"""
err_code = _cudamat.free_device_memory(self.p_mat)
if err_code:
raise generate_exception(err_code)
def set_trans(self, is_trans):
"""
Set the transposedness flag to is_trans.
"""
_cudamat.set_transpose(self.p_mat, ct.c_int(1 * is_trans))
def slice(self, first_col, last_col):
mat = cudamat()
if self.mat.size[0] == 1 or self.mat.size[1] == 1:
err_code = _cudamat.get_vector_slice(self.p_mat, ct.pointer(mat), ct.c_int(first_col), ct.c_int(last_col))
else:
err_code = _cudamat.get_slice(self.p_mat, ct.pointer(mat), ct.c_int(first_col), ct.c_int(last_col))
if err_code:
raise generate_exception(err_code)
new_mat = CUDAMatrix(mat)
try:
new_mat.sliceof = self.sliceof
except:
new_mat.sliceof = self
return new_mat
def get_col_slice(self, first_col, last_col, target = None):
col_slice = self.slice(first_col, last_col)
if target:
target.assign(col_slice)
return target
else:
return col_slice
def set_col_slice(self, first_col, last_col, mat):
self.slice(first_col, last_col).assign(mat)
return self
def get_row_slice(self, start, end, target = None):
"""
Get the rows with indices start through end. If target is not provided
memory for a new matrix will be allocated.
"""
width = self.shape[1]
if not target:
target = empty((end-start, width))
err_code = _cudamat.get_row_slice(self.p_mat, target.p_mat, ct.c_int(start), ct.c_int(end))
if err_code:
raise generate_exception(err_code)
return target
def set_row_slice(self, start, end, mat):
"""
Assign the contents of mat to the rows with indices start through end.
"""
err_code = _cudamat.set_row_slice(mat.p_mat, self.p_mat, ct.c_int(start), ct.c_int(end))
if err_code:
raise generate_exception(err_code)
return self
def transpose(self, target = None):
"""
Return a transposed copy of the matrix.
"""
if not target:
target = empty((self.shape[1], self.shape[0]))
err_code = _cudamat.copy_transpose(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def fill_with_rand(self):
"""
Fill matrix on the GPU with random numbers drawn from the uniform
distribution over the (0,1) interval.
"""
err_code = _cudamat.fill_with_rand(CUDAMatrix.rnd_state_p, self.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def fill_with_randn(self):
"""
Fill matrix on the GPU with random numbers drawn from the standard normal
distribution.
"""
err_code = _cudamat.fill_with_randn(CUDAMatrix.rnd_state_p, self.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def add_col_vec(self, vec, target = None):
"""
Add vector vec to every column of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def add_col_mult(self, vec, mult, target = None):
"""
Add a multiple of vector vec to every column of the matrix. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_col_mult(self.p_mat, vec.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return target
def add_row_vec(self, vec, target = None):
"""
Add vector vec to every row of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def mult_by_col(self, vec, target = None):
"""
Multiply vector vec into every column of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.mult_by_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def mult_by_row(self, vec, target = None):
"""
Multiply vector vec into every row of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.mult_by_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sum(self, axis, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
return sum(self, axis, target)
def add_sums(self, mat, axis, mult = 1.):
"""
Add a multiple of the sums of the matrix mat along the given dimension
to self.
"""
m = _cudamat.get_leading_dimension(mat.p_mat)
n = _cudamat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
left = CUDAMatrix.ones.slice(0, m)
left.set_trans(True)
right = mat
elif axis == 1:
# sum along non-leading dimension
left = mat
right = CUDAMatrix.ones.slice(0, n)
err_code = _cudamat.dot(left.p_mat, right.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def less_than(self, val, target = None):
"""
Perform the operation target = 1. * (self < val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.less_than_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.less_than(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def greater_than(self, val, target = None):
"""
Perform the operation target = 1. * (self > val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.greater_than_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.greater_than(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def equals(self, val, target = None):
"""
Perform the operation target = 1. * (self == val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.equals_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.equals(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def max(self, axis, target = None):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _cudamat.max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def sign(self, target = None):
"""
Find the sign of each element of the matrix.
"""
if not target:
target = empty((self.mat.size[0], self.mat.size[1]))
err_code = _cudamat.sign(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def apply_sigmoid(self, target = None):
"""
Apply the logistic sigmoid to each element of the matrix.
"""
return sigmoid(self, target)
def reciprocal(self, target = None):
"""
Find the reciprocal of each element of the matrix.
"""
if not target:
target = self
err_code = _cudamat.reciprocal(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def dot(self, mat2, target = None):
"""
Multiply the matrix by mat2 from the right.
"""
return dot(self, mat2, target)
def add_dot(self, m1, m2):
"""
Add the dot product of m1 and m2 to the matrix.
"""
err_code = _cudamat.dot(m1.p_mat, m2.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(1.))
if err_code:
raise generate_exception(err_code)
return self
def subtract_dot(self, m1, m2):
"""
Subtract the dot product of m1 and m2 from the matrix.
"""
err_code = _cudamat.dot(m1.p_mat, m2.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(-1.))
if err_code:
raise generate_exception(err_code)
return self
def add_mult(self, mat2, alpha = 1.):
"""
Add multiple of mat2 to the matrix.
"""
err_code = _cudamat.add_mult(self.p_mat, mat2.p_mat, ct.c_float(alpha))
if err_code:
raise generate_exception(err_code)
return self
def subtract_mult(self, mat2, alpha = 1.):
"""
Subtract a multiple of mat2 from the matrix.
"""
err_code = _cudamat.add_mult(self.p_mat, mat2.p_mat, ct.c_float(-1. * alpha))
if err_code:
raise generate_exception(err_code)
return self
def add(self, val, target = None):
"""Add val to self, where val can be a scalar or a CUDAMatrix with the
same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.add_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.add_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def subtract(self, val, target = None):
"""Subtract val from self, where val can be a scalar or a CUDAMatrix with
the same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.subtract_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.add_scalar(self.p_mat, ct.c_float(-1*val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def divide(self, val, target = None):
"""Divide self by val, where val can be a scalar or a CUDAMatrix with the
same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.divide_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.divide_by_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def mult(self, val, target = None):
"""Multiply self by val, where val can be a scalar or a CUDAMatrix with
the same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.mult_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.mult_by_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def assign_scalar(self, alpha):
"""
Assign scalar alpha to every element of the matrix.
"""
err_code = _cudamat.assign_scalar(self.p_mat, ct.c_float(alpha))
if err_code:
raise generate_exception(err_code)
return self
@deprecated
def mult_by_scalar(self, alpha, target = None):
"""
Multiply the matrix by a scalar.
"""
if not target:
target = self
err_code = _cudamat.mult_by_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def div_by_scalar(self, alpha, target = None):
"""
Divide the matrix by a scalar.
"""
if not target:
target = self
err_code = _cudamat.divide_by_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def add_scalar(self, alpha, target = None):
"""
Increment the matrix by a scalar.
"""
if not target:
target = self
err_code = _cudamat.add_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def euclid_norm(self):
err_code = ct.c_int(0)
res = _cudamat.euclid_norm(self.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def select_columns(self, indices, target):
"""
copies some columns of self into target.
<indices> must be a row vector. Its elements are float32's representing integers, e.g. "34.0" means the integer "34".
after this call, for all r,c, target[r,c]=self[r,indices[c]].
This returns target.
Negative indices are interpreted in the usual Python way: all elements of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in <target>.
"""
err_code = _cudamat.selectRows(self.p_mat, target.p_mat, indices.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def set_selected_columns(self, indices, source):
"""
copies all columns of source into some columns of self.
<indices> must be a row vector. Its elements are float32's representing
integers, e.g. "34.0" means the integer "34". after this call, for all
r,c, self[r,indices[c]]=source[r,c]. This returns self.
Negative indices are interpreted in the usual Python way: all elements
of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an
exception (because the programmer was lazy). Instead, they result in NaN
values in <self>.
"""
err_code = _cudamat.setSelectedRows(self.p_mat, source.p_mat, indices.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def empty(shape):
"""
Creates and returns a new CUDAMatrix with the given shape.
"""
mat = cudamat()
err_code = _cudamat.init_empty(ct.pointer(mat), ct.c_int(shape[0]), ct.c_int(shape[1]))
if err_code:
raise generate_exception(err_code)
return CUDAMatrix(mat)
def sum(mat, axis, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
m = _cudamat.get_leading_dimension(mat.p_mat)
n = _cudamat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
left = CUDAMatrix.ones.slice(0, m)
left.set_trans(True)
right = mat
if not target:
target = empty((1, n))
elif axis == 1:
# sum along non-leading dimension
left = mat
right = CUDAMatrix.ones.slice(0, n)
if not target:
target = empty((m, 1))
err_code = _cudamat.dot(left.p_mat, right.p_mat, target.p_mat, ct.c_float(0.), ct.c_float(1.))
if err_code:
raise generate_exception(err_code)
return target
def dot(m1, m2, target = None):
"""
Find the dot product between m1 and m2.
"""
if not target:
m = _cudamat.get_leading_dimension(m1.p_mat)
n = _cudamat.get_nonleading_dimension(m2.p_mat)
target = empty((m, n))
err_code = _cudamat.dot(m1.p_mat, m2.p_mat, target.p_mat, ct.c_float(0.), ct.c_float(1.))
if err_code:
raise generate_exception(err_code)
return target
def vdot(m1, m2):
"""
Compute the vector dot product of matrices m1 and m2.
"""
err_code = ct.c_int(0)
res = _cudamat.vdot(m1.p_mat, m2.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def sigmoid(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_sigmoid(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def tanh(mat, target = None):
"""
Apply the tanh to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_tanh(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def abs(mat, target = None):
"""
Apply abs to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_abs(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log_1_plus_exp(mat, target = None):
"""
Apply log(1+exp(x)) to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_log_1_plus_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log(mat, target = None):
"""
Find the natural logarithm of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_log(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def exp(mat, target = None):
"""
Apply the exponential function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def gamma(mat, target = None):
"""
Apply the gamma function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_gamma(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def lgamma(mat, target = None):
"""
Apply the log gamma function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_lgamma(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sqrt(mat, target = None):
"""
Compute the square root of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_sqrt(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def pow(mat, p, target = None):
"""
If p is a scalar, compute the 'p'th power of each element of the matrix mat,
otherwise raise each element of the matrix mat to the power given by the
corresponding element of the matrix p.
"""
if not target:
target = mat
if isinstance(p, CUDAMatrix):
err_code = _cudamat.apply_pow_matrix(mat.p_mat, p.p_mat, target.p_mat)
elif isinstance(p, (int, float)):
err_code = _cudamat.apply_pow(mat.p_mat, ct.c_float(p), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def cuda_sync_threads():
_cudamat.cuda_sync_threads()
def reformat(array):
"""
Returns array as a float32 array in FORTRAN order.
"""
return np.array(array, dtype=np.float32, order='F')
def cuda_set_device(dev_id):
"""
Selects the CUDA device with the given ID.
"""
err_code = _cudamat.cuda_set_device(ct.c_int(dev_id))
if err_code:
raise generate_exception(err_code)
def cublas_init():
"""
Initialize Cublas.
"""
_cudamat.cublas_init()
CUDAMatrix.ones = CUDAMatrix(np.ones((MAX_ONES, 1), dtype=np.float32, order = 'F'))
init = cublas_init
def cublas_shutdown():
"""
Shut down Cublas.
"""
CUDAMatrix.ones = 0
_cudamat.cublas_shutdown()
shutdown = cublas_shutdown
|
{
"content_hash": "8a8c2c88f2eefe4f8c539024fdbed45c",
"timestamp": "",
"source": "github",
"line_count": 1111,
"max_line_length": 169,
"avg_line_length": 29.353735373537354,
"alnum_prop": 0.5927572672635839,
"repo_name": "nitishsrivastava/cudamat",
"id": "b6a96684c8f9438615385d2c771a5fcaf89f81f3",
"size": "32612",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cudamat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from boto.swf.exceptions import SWFResponseError
from freezegun import freeze_time
import sure # noqa
from moto import mock_swf_deprecated
from moto.swf import swf_backend
from ..utils import setup_workflow
# PollForDecisionTask endpoint
@mock_swf_deprecated
def test_poll_for_decision_task_when_one():
conn = setup_workflow()
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"])
resp = conn.poll_for_decision_task(
"test-domain", "queue", identity="srv01")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["WorkflowExecutionStarted",
"DecisionTaskScheduled", "DecisionTaskStarted"])
resp[
"events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01")
@mock_swf_deprecated
def test_poll_for_decision_task_when_none():
conn = setup_workflow()
conn.poll_for_decision_task("test-domain", "queue")
resp = conn.poll_for_decision_task("test-domain", "queue")
# this is the DecisionTask representation you get from the real SWF
# after waiting 60s when there's no decision to be taken
resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0})
@mock_swf_deprecated
def test_poll_for_decision_task_on_non_existent_queue():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "non-existent-queue")
resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0})
@mock_swf_deprecated
def test_poll_for_decision_task_with_reverse_order():
conn = setup_workflow()
resp = conn.poll_for_decision_task(
"test-domain", "queue", reverse_order=True)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"])
# CountPendingDecisionTasks endpoint
@mock_swf_deprecated
def test_count_pending_decision_tasks():
conn = setup_workflow()
conn.poll_for_decision_task("test-domain", "queue")
resp = conn.count_pending_decision_tasks("test-domain", "queue")
resp.should.equal({"count": 1, "truncated": False})
@mock_swf_deprecated
def test_count_pending_decision_tasks_on_non_existent_task_list():
conn = setup_workflow()
resp = conn.count_pending_decision_tasks("test-domain", "non-existent")
resp.should.equal({"count": 0, "truncated": False})
@mock_swf_deprecated
def test_count_pending_decision_tasks_after_decision_completes():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
conn.respond_decision_task_completed(resp["taskToken"])
resp = conn.count_pending_decision_tasks("test-domain", "queue")
resp.should.equal({"count": 0, "truncated": False})
# RespondDecisionTaskCompleted endpoint
@mock_swf_deprecated
def test_respond_decision_task_completed_with_no_decision():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
resp = conn.respond_decision_task_completed(
task_token,
execution_context="free-form context",
)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal([
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
])
evt = resp["events"][-1]
evt["decisionTaskCompletedEventAttributes"].should.equal({
"executionContext": "free-form context",
"scheduledEventId": 2,
"startedEventId": 3,
})
resp = conn.describe_workflow_execution(
"test-domain", conn.run_id, "uid-abcd1234")
resp["latestExecutionContext"].should.equal("free-form context")
@mock_swf_deprecated
def test_respond_decision_task_completed_with_wrong_token():
conn = setup_workflow()
conn.poll_for_decision_task("test-domain", "queue")
conn.respond_decision_task_completed.when.called_with(
"not-a-correct-token"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_respond_decision_task_completed_on_close_workflow_execution():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
# bad: we're closing workflow execution manually, but endpoints are not
# coded for now..
wfe = swf_backend.domains[0].workflow_executions[-1]
wfe.execution_status = "CLOSED"
# /bad
conn.respond_decision_task_completed.when.called_with(
task_token
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_task_already_completed():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
conn.respond_decision_task_completed(task_token)
conn.respond_decision_task_completed.when.called_with(
task_token
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_complete_workflow_execution():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [{
"decisionType": "CompleteWorkflowExecution",
"completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"}
}]
resp = conn.respond_decision_task_completed(
task_token, decisions=decisions)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal([
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
"WorkflowExecutionCompleted",
])
resp["events"][-1]["workflowExecutionCompletedEventAttributes"][
"result"].should.equal("foo bar")
@mock_swf_deprecated
def test_respond_decision_task_completed_with_close_decision_not_last():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{"decisionType": "CompleteWorkflowExecution"},
{"decisionType": "WeDontCare"},
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(SWFResponseError, r"Close must be last decision in list")
@mock_swf_deprecated
def test_respond_decision_task_completed_with_invalid_decision_type():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{"decisionType": "BadDecisionType"},
{"decisionType": "CompleteWorkflowExecution"},
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions).should.throw(
SWFResponseError,
r"Value 'BadDecisionType' at 'decisions.1.member.decisionType'"
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_missing_attributes():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{
"decisionType": "should trigger even with incorrect decision type",
"startTimerDecisionAttributes": {}
},
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(
SWFResponseError,
r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' "
r"failed to satisfy constraint: Member must not be null"
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_missing_attributes_totally():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{"decisionType": "StartTimer"},
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(
SWFResponseError,
r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' "
r"failed to satisfy constraint: Member must not be null"
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_fail_workflow_execution():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [{
"decisionType": "FailWorkflowExecution",
"failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"}
}]
resp = conn.respond_decision_task_completed(
task_token, decisions=decisions)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal([
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
"WorkflowExecutionFailed",
])
attrs = resp["events"][-1]["workflowExecutionFailedEventAttributes"]
attrs["reason"].should.equal("my rules")
attrs["details"].should.equal("foo")
@mock_swf_deprecated
@freeze_time("2015-01-01 12:00:00")
def test_respond_decision_task_completed_with_schedule_activity_task():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "my-activity-001",
"activityType": {
"name": "test-activity",
"version": "v1.1"
},
"heartbeatTimeout": "60",
"input": "123",
"taskList": {
"name": "my-task-list"
},
}
}]
resp = conn.respond_decision_task_completed(
task_token, decisions=decisions)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal([
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
"ActivityTaskScheduled",
])
resp["events"][-1]["activityTaskScheduledEventAttributes"].should.equal({
"decisionTaskCompletedEventId": 4,
"activityId": "my-activity-001",
"activityType": {
"name": "test-activity",
"version": "v1.1",
},
"heartbeatTimeout": "60",
"input": "123",
"taskList": {
"name": "my-task-list"
},
})
resp = conn.describe_workflow_execution(
"test-domain", conn.run_id, "uid-abcd1234")
resp["latestActivityTaskTimestamp"].should.equal(1420113600.0)
|
{
"content_hash": "5007894309a569fb538a85e83f3bd4be",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 93,
"avg_line_length": 33.16959064327485,
"alnum_prop": 0.6631699576868829,
"repo_name": "whummer/moto",
"id": "972b1053b77d49dae6794b898b2d3b6c0660fcbe",
"size": "11344",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_swf/responses/test_decision_tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1148"
},
{
"name": "Python",
"bytes": "6015085"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
def get_language(tokens, stopwords):
"""
Extract the language from an array of tokens, it is based on stop words
It takes two args:
tokens -> list of words to check the language
stopwords -> dict of set: {lang : set(language,specific,stop,words)}
It sets a score to a language by intersection between tokens & stopwords
by language. The highest score is considered as the main language of
the list and is returned.
improvement: if 2 language have equal score, can look at subject (too
small for good detection) or define a default value
"""
languages_ratios = dict()
words = [str(word).lower() for word in tokens]
words_set = set(words)
for language in stopwords.keys():
common_elements = words_set.intersection(stopwords[language])
languages_ratios[language] = len(common_elements) # language score
return max(languages_ratios, key=languages_ratios.get)
|
{
"content_hash": "d1858a9c921f41aa10b1600eb7222d5c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.6907756813417191,
"repo_name": "Nedgang/adt_project",
"id": "21784fb3965db0a803add07907b8c27dd4e0df8c",
"size": "1003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33966"
},
{
"name": "TeX",
"bytes": "2690"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['MovingAverage'] , ['BestCycle'] , ['MLP'] );
|
{
"content_hash": "b0256233f6443df46122af56525188ff",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 84,
"avg_line_length": 39.25,
"alnum_prop": 0.7133757961783439,
"repo_name": "antoinecarme/pyaf",
"id": "550a7f9d7048689f4f7ee388e31399bc2bc60b51",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingAverage_BestCycle_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
Implement a hash table with arrays.
"""
class Dictionary:
"""
A hash table is an array of arrays. The outer array's index refers to the "hashed" index generated from a given
key. Two different keys could potentially hash to the same index. To handle this collision, the inner array
is used. As long as hash functions evenly distribute keys, O(1) retrieval should be possible as long not too many
keys are stored with regards to overall size of hash table. Size of hash table can be increased to affect this
performance of retrieval - generically the performance is O(n/m) where n is universe of all keys possible and m
is size of hash table. For example, if universe keys possible are 100 and hash table is only 10 size, then on
average, O(100/10) = O(10) which is still sub-linear speed.
"""
def __init__(self, size=100):
self.size = size
self.data = [[] for _ in range(self.size)]
def __setitem__(self, key, value):
key_index = self._hash(key)
bucket = self.data[key_index]
if not bucket:
bucket.append((key, value))
else:
# Handle hash collision
for i, kv in enumerate(bucket):
if kv[0] == key:
bucket[i] = (key, value)
break
def __getitem__(self, key):
key_index = self._hash(key)
bucket = self.data[key_index]
value = None
for kv in bucket:
if kv[0] == key:
value = kv[1]
break
return value
def __delitem__(self, key):
key_index = self._hash(key)
bucket = self.data[key_index]
for i, kv in enumerate(bucket):
if kv[0] == key:
bucket.remove(kv)
break
def _hash(self, key):
return hash(key) % self.size
|
{
"content_hash": "c911bd1f370159a8f1a26fd5bff4691c",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 118,
"avg_line_length": 37.54,
"alnum_prop": 0.5817794352690463,
"repo_name": "JDFagan/InterviewInPython",
"id": "12bf02e3632e0839e56d0a052f90916c05c6e41e",
"size": "1877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/dictionary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "38742"
},
{
"name": "Python",
"bytes": "113273"
}
],
"symlink_target": ""
}
|
from openstack.load_balancer import load_balancer_service as lb_service
from openstack import resource2 as resource
class LoadBalancer(resource.Resource):
resource_key = 'loadbalancer'
resources_key = 'loadbalancers'
base_path = '/v2.0/lbaas/loadbalancers'
service = lb_service.LoadBalancerService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'description', 'flavor', 'name', 'project_id', 'provider',
'vip_address', 'vip_network_id', 'vip_port_id', 'vip_subnet_id',
'provisioning_status', 'operating_status',
is_admin_state_up='admin_state_up'
)
#: Properties
#: The administrative state of the load balancer *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: Timestamp when the load balancer was created
created_at = resource.Body('created_at')
#: The load balancer description
description = resource.Body('description')
#: The load balancer flavor
flavor = resource.Body('flavor')
#: List of listeners associated with this load balancer
listeners = resource.Body('listeners', type=list)
#: The load balancer name
name = resource.Body('name')
#: Operating status of the load balancer
operating_status = resource.Body('operating_status')
#: List of pools associated with this load balancer
pools = resource.Body('pools', type=list)
#: The ID of the project this load balancer is associated with.
project_id = resource.Body('project_id')
#: Provider name for the load balancer.
provider = resource.Body('provider')
#: The provisioning status of this load balancer
provisioning_status = resource.Body('provisioning_status')
#: Timestamp when the load balancer was last updated
updated_at = resource.Body('updated_at')
#: VIP address of load balancer
vip_address = resource.Body('vip_address')
#: VIP netowrk ID
vip_network_id = resource.Body('vip_network_id')
#: VIP port ID
vip_port_id = resource.Body('vip_port_id')
#: VIP subnet ID
vip_subnet_id = resource.Body('vip_subnet_id')
|
{
"content_hash": "61680966e3b2427f51b34805e82581b3",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 39.228070175438596,
"alnum_prop": 0.6829159212880143,
"repo_name": "briancurtin/python-openstacksdk",
"id": "1e2afa733cea96f33d8226dbb0d1849ee64a8f4b",
"size": "2782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack/load_balancer/v2/load_balancer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1956816"
},
{
"name": "Shell",
"bytes": "1865"
}
],
"symlink_target": ""
}
|
"""
Django settings for project_card project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^o(grfryj_#r7@++7bsb#e75d65rxhjj2v09_o&mbttx!)##wg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'company',
'user',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project_card.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_card.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "15c68d22b5b8bda9e9569136fa0a77ed",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 71,
"avg_line_length": 25.798076923076923,
"alnum_prop": 0.6891539321654864,
"repo_name": "PugliaSOS/card-project",
"id": "db1781b1da6615455393c8d223b5f814135da8f8",
"size": "2683",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project_card/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4458"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='fileconversions',
version='0.1.0.0',
packages=['fileconversions', 'fileconversions.conversions'],
url='',
license='MIT',
author='Wilberto Morales',
author_email='[email protected]',
description='Various file format conversions.'
)
|
{
"content_hash": "ce2289a66b8773e686dadd2a32d08c4d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 64,
"avg_line_length": 26.75,
"alnum_prop": 0.6915887850467289,
"repo_name": "wilbertom/fileconversions",
"id": "2ac7a81d46d7b357279a33d01297710a694f6b11",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13236"
}
],
"symlink_target": ""
}
|
import json
import os
import urllib2
import httplib2
import cv2
import matplotlib.pyplot as plt
from color_histogram.io_util.image import loadRGB, saveRGB
from color_histogram.datasets.datasets import dataDir, dataFiles
# # Simple image loaders via Google image API.
class GoogleImageLoader:
# # Constructor
# @param keyword keyword for image search.
# @param num_images target number of images for the search.
# @param update Update existing images if the value is True.
def __init__(self, keyword="banana", num_images=10, update=False):
self._keyword = keyword
self._num_images = num_images
self._data_dir = dataDir(keyword)
self._update = update
self.searchImageURLs()
self.downloadImages()
self.postResize()
def searchImageURLs(self):
keyword = self._keyword
num_images = self._num_images
image_urls = []
google_api = "http://ajax.googleapis.com/ajax/services/search/images?q={0}&v=1.0&rsz=large&start={1}&imgc=color"
for i in range((num_images / 8) + 1):
res = urllib2.urlopen(google_api.format(keyword, i * 8))
page_data = json.load(res)
page_urls = [result["url"] for result in page_data["responseData"]["results"]]
image_urls.extend(page_urls)
if len(image_urls) >= num_images:
image_urls = image_urls[:num_images]
self._image_urls = image_urls
return image_urls
def downloadImages(self):
print " Download"
data_name = self._keyword
image_urls = self._image_urls
data_dir = self._data_dir
if os.path.exists(data_dir) == False:
os.makedirs(data_dir)
http = httplib2.Http(".cache")
for i in range(len(set(image_urls))):
try:
url_name, ext = os.path.splitext(image_urls[i])
data_filename = "%s_%s%s" % (data_name, i, ext)
data_filepath = os.path.join(data_dir, data_filename)
if not self._update:
if os.path.exists(data_filepath):
print " - Skip: %s" % data_filename
continue
response, content = http.request(image_urls[i])
with open(data_filepath, 'wb') as data_file:
data_file.write(content)
print " - Done: %s" % data_filename
except:
continue
def postResize(self):
print " Post resize"
data_name = self._keyword
data_files = dataFiles(data_name)
for data_file in data_files:
data_filename = os.path.basename(data_file)
C_8U = loadRGB(data_file)
if C_8U is None:
os.remove(data_file)
print " - Delete: %s" % data_filename
continue
h, w = C_8U.shape[0:2]
opt_scale = 800.0 / float(h)
opt_scale = max(opt_scale, 800.0 / float(w))
opt_scale = min(opt_scale, 1.0)
h_opt = int(opt_scale * h)
w_opt = int(opt_scale * w)
C_8U_small = cv2.resize(C_8U, (w_opt, h_opt))
saveRGB(data_file, C_8U_small)
print " - Resized: %s" % data_filename
# # Create dataset for the given data_name.
def createDataset(data_name="banana", num_images=10, update=False):
GoogleImageLoader(data_name, num_images, update)
# # Create datasets for the given data_names.
def createDatasets(data_names=["apple", "banana", "sky", "tulip", "flower"],
num_images=10,
update=False):
for data_name in data_names:
print "Create datasets: %s" % data_name
createDataset(data_name, num_images, update)
if __name__ == '__main__':
createDatasets()
|
{
"content_hash": "3de0f1e8d403bfa0ed1082c38766657d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 120,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.5630165289256198,
"repo_name": "tody411/ColorHistogram",
"id": "262ea9aa151847ec3f2ac9bc0a96f1bf104a8643",
"size": "4218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "color_histogram/datasets/google_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42084"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import sys
from contextlib import contextmanager
from robot.errors import (INFO_PRINTED, DATA_ERROR, STOPPED_BY_USER,
FRAMEWORK_ERROR, Information, DataError)
from .argumentparser import ArgumentParser
from .encoding import encode_output
from .error import get_error_details
class Application(object):
def __init__(self, usage, name=None, version=None, arg_limits=None,
logger=None, **auto_options):
self._ap = ArgumentParser(usage, name, version, arg_limits,
self.validate, **auto_options)
self._logger = logger or DefaultLogger()
def main(self, arguments, **options):
raise NotImplementedError
def validate(self, options, arguments):
return options, arguments
def execute_cli(self, cli_arguments):
with self._logging():
options, arguments = self._parse_arguments(cli_arguments)
rc = self._execute(arguments, options)
self._exit(rc)
def console(self, msg):
print encode_output(msg)
@contextmanager
def _logging(self):
self._logger.register_file_logger()
self._logger.info('%s %s' % (self._ap.name, self._ap.version))
try:
yield
finally:
self._logger.close()
def _parse_arguments(self, cli_args):
try:
options, arguments = self._ap.parse_args(cli_args)
except Information, msg:
self._report_info(unicode(msg))
except DataError, err:
self._report_error(unicode(err), help=True, exit=True)
else:
self._logger.info('Arguments: %s' % ','.join(arguments))
return options, arguments
def execute(self, *arguments, **options):
with self._logging():
return self._execute(arguments, options)
def _execute(self, arguments, options):
try:
rc = self.main(arguments, **options)
except DataError, err:
return self._report_error(unicode(err), help=True)
except (KeyboardInterrupt, SystemExit):
return self._report_error('Execution stopped by user.',
rc=STOPPED_BY_USER)
except:
error, details = get_error_details()
return self._report_error('Unexpected error: %s' % error,
details, rc=FRAMEWORK_ERROR)
else:
return rc or 0
def _report_info(self, err):
self.console(unicode(err))
self._exit(INFO_PRINTED)
def _report_error(self, message, details=None, help=False, rc=DATA_ERROR,
exit=False):
if help:
message += '\n\nTry --help for usage information.'
if details:
message += '\n' + details
self._logger.error(message)
if exit:
self._exit(rc)
return rc
def _exit(self, rc):
sys.exit(rc)
class DefaultLogger(object):
def register_file_logger(self):
pass
def info(self, message):
pass
def error(self, message):
print encode_output(message)
def close(self):
pass
|
{
"content_hash": "e5882000980a9740e7e121959a28c1d3",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 31.261682242990656,
"alnum_prop": 0.5584454409566517,
"repo_name": "Senseg/robotframework",
"id": "f45fa170ee438057d44428e15a19be0d56da554d",
"size": "3963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/utils/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "716"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "149654"
},
{
"name": "Python",
"bytes": "1637427"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
}
|
import sys
from multiprocessing import Process
from joblib import Parallel, delayed
import logging
logging.basicConfig(format='%(levelname)s : %(asctime)s : %(message)s', level=logging.INFO)
x_seperator = '\nXXXXXXX\n' # define document separator (7 Xs). This separator is used when all the docs are in one file (a corpus file)
##################################################################
#Example Usage:
#python aligning-docs-by-interlinks-demo.py ~/wikipedia/processed/arwiki-20150311-pages-articles.txt ~/wikipedia/processed/arzwiki-20150329-pages-articles.txt ar arz ../docs_aligned_by_links/
def usage():
print 'Usage: ', sys.argv[0], '<source corpus file> <target corpus file> <source language> <target language> <output path>'
##################################################################
if len(sys.argv) < 6: usage(); sys.exit(2)
'''
This software is a demo aligning wikipeida comparable documents using interlanguage links. The method is described in
https://sites.google.com/site/motazsite/Home/publications/saad_phd.pdf
Motaz Saad. Mining Documents and Sentiments in Cross-lingual Context. PhD thesis, Université de Lorraine, January 2015.
'''
import imp
tp = imp.load_source('textpro', 'textpro.py')
def main(argv):
source_corpus_file = sys.argv[1]
target_corpus_file = sys.argv[2]
source_language = sys.argv[3]
target_language = sys.argv[4]
output_path = sys.argv[5]
if not output_path.endswith('/'): output_path = output_path + '/'
tp.check_dir(output_path) # if directory does not exist, then create
logging.info( 'aligning %s and %s wikipeida documents using interlanguage links', source_language, target_language)
source_docs = tp.split_wikipedia_docs_into_array(source_corpus_file)
logging.info( 'source corpus is loaded')
target_docs = tp.split_wikipedia_docs_into_array(target_corpus_file)
logging.info( 'target corpus is loaded ... start aligning ...')
aligned_corpus = Parallel(n_jobs=3,verbose=100)(delayed(tp.aligning_doc_by_interlanguage_links)(d, target_docs, source_language, target_language, output_path) for d in source_docs)
source_out = open(output_path + source_language + '.wiki.txt', 'w')
target_out = open(output_path + target_language + '.wiki.txt', 'w')
for doc_pair in aligned_corpus:
if doc_pair[0]: # if not None
text_out = doc_pair[0]
print>>source_out, text_out.encode('utf-8')
text_out = doc_pair[1]
print>>target_out, text_out.encode('utf-8')
##################################################################
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "9765eb2bd2b1365a5f2addf010e438b1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 191,
"avg_line_length": 35.84722222222222,
"alnum_prop": 0.6671832623014335,
"repo_name": "motazsaad/comparable-text-miner",
"id": "56e18fec227c5af371445f82e0c615e60eab06e2",
"size": "2622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aligning-docs-by-interlinks-demo2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "139785"
}
],
"symlink_target": ""
}
|
from jumpgate.common.openstack import setup_responder
def setup_routes(app, disp):
return setup_responder(app, disp, 'identity')
|
{
"content_hash": "411fddf5de42c58dcc8be92f44f303eb",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 27,
"alnum_prop": 0.762962962962963,
"repo_name": "BillArnold/barnoldjg",
"id": "ef559bdd4f750b682382d33aee02c0eeb91167f2",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jumpgate/identity/drivers/openstack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162307"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
import os, datetime;
class ProcessLockException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ProcessLock():
def __init__(self,lockKey):
self.lockKey = lockKey+'.lock';
def aquire(self):
status = os.system('lockfile '+self.lockKey);
if status != 0:
raise ProcessLockException('Could not aquire ProcessLock '+self.lockKey);
def release(self):
status = os.system('rm -f '+self.lockKey);
if status != 0:
raise ProcessLockException('Could not release ProcessLock '+self.lockKey);
|
{
"content_hash": "4d96fc99fb477b20a22c7a09da4bae74",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 86,
"avg_line_length": 29.291666666666668,
"alnum_prop": 0.5604551920341394,
"repo_name": "softwarespartan/AGT",
"id": "2b79cc0cb9ef91810d7c06d7bb53a360b4494f66",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "S3/Synchronization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "350585"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
String,
Integer,
Bool,
NoneSet,
Set,
Sequence,
)
from openpyxl.descriptors.excel import ExtensionList, Relation
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.descriptors.nested import NestedString
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.workbook.defined_name import DefinedName, DefinedNameList
from openpyxl.workbook.external_reference import ExternalReference
from openpyxl.workbook.function_group import FunctionGroupList
from openpyxl.workbook.properties import WorkbookProperties, CalcProperties, FileVersion
from openpyxl.workbook.protection import WorkbookProtection, FileSharing
from openpyxl.workbook.smart_tags import SmartTagList, SmartTagProperties
from openpyxl.workbook.views import CustomWorkbookView, BookView
from openpyxl.workbook.web import WebPublishing, WebPublishObjectList
class FileRecoveryProperties(Serialisable):
tagname = "fileRecoveryPr"
autoRecover = Bool(allow_none=True)
crashSave = Bool(allow_none=True)
dataExtractLoad = Bool(allow_none=True)
repairLoad = Bool(allow_none=True)
def __init__(self,
autoRecover=None,
crashSave=None,
dataExtractLoad=None,
repairLoad=None,
):
self.autoRecover = autoRecover
self.crashSave = crashSave
self.dataExtractLoad = dataExtractLoad
self.repairLoad = repairLoad
class ChildSheet(Serialisable):
"""
Represents a reference to a worksheet or chartsheet in workbook.xml
It contains the title, order and state but only an indirect reference to
the objects themselves.
"""
tagname = "sheet"
name = String()
sheetId = Integer()
state = NoneSet(values=(['visible', 'hidden', 'veryHidden']))
id = Relation()
def __init__(self,
name=None,
sheetId=None,
state="visible",
id=None,
):
self.name = name
self.sheetId = sheetId
self.state = state
self.id = id
class PivotCache(Serialisable):
tagname = "pivotCache"
cacheId = Integer()
id = Relation()
def __init__(self,
cacheId=None,
id=None
):
self.cacheId = cacheId
self.id = id
class WorkbookPackage(Serialisable):
"""
Represent the workbook file in the archive
"""
tagname = "workbook"
conformance = NoneSet(values=['strict', 'transitional'])
fileVersion = Typed(expected_type=FileVersion, allow_none=True)
fileSharing = Typed(expected_type=FileSharing, allow_none=True)
workbookPr = Typed(expected_type=WorkbookProperties, allow_none=True)
properties = Alias("workbookPr")
workbookProtection = Typed(expected_type=WorkbookProtection, allow_none=True)
bookViews = NestedSequence(expected_type=BookView)
sheets = NestedSequence(expected_type=ChildSheet)
functionGroups = Typed(expected_type=FunctionGroupList, allow_none=True)
externalReferences = NestedSequence(expected_type=ExternalReference)
definedNames = Typed(expected_type=DefinedNameList, allow_none=True)
calcPr = Typed(expected_type=CalcProperties, allow_none=True)
oleSize = NestedString(allow_none=True, attribute="ref")
customWorkbookViews = NestedSequence(expected_type=CustomWorkbookView)
pivotCaches = NestedSequence(expected_type=PivotCache, allow_none=True)
smartTagPr = Typed(expected_type=SmartTagProperties, allow_none=True)
smartTagTypes = Typed(expected_type=SmartTagList, allow_none=True)
webPublishing = Typed(expected_type=WebPublishing, allow_none=True)
fileRecoveryPr = Typed(expected_type=FileRecoveryProperties, allow_none=True)
webPublishObjects = Typed(expected_type=WebPublishObjectList, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
Ignorable = NestedString(namespace="http://schemas.openxmlformats.org/markup-compatibility/2006", allow_none=True)
__elements__ = ('fileVersion', 'fileSharing', 'workbookPr',
'workbookProtection', 'bookViews', 'sheets', 'functionGroups',
'externalReferences', 'definedNames', 'calcPr', 'oleSize',
'customWorkbookViews', 'pivotCaches', 'smartTagPr', 'smartTagTypes',
'webPublishing', 'fileRecoveryPr', 'webPublishObjects')
def __init__(self,
conformance=None,
fileVersion=None,
fileSharing=None,
workbookPr=None,
workbookProtection=None,
bookViews=(),
sheets=(),
functionGroups=None,
externalReferences=(),
definedNames=None,
calcPr=None,
oleSize=None,
customWorkbookViews=(),
pivotCaches=(),
smartTagPr=None,
smartTagTypes=None,
webPublishing=None,
fileRecoveryPr=None,
webPublishObjects=None,
extLst=None,
Ignorable=None,
):
self.conformance = conformance
self.fileVersion = fileVersion
self.fileSharing = fileSharing
if workbookPr is None:
workbookPr = WorkbookProperties()
self.workbookPr = workbookPr
self.workbookProtection = workbookProtection
self.bookViews = bookViews
self.sheets = sheets
self.functionGroups = functionGroups
self.externalReferences = externalReferences
self.definedNames = definedNames
self.calcPr = calcPr
self.oleSize = oleSize
self.customWorkbookViews = customWorkbookViews
self.pivotCaches = pivotCaches
self.smartTagPr = smartTagPr
self.smartTagTypes = smartTagTypes
self.webPublishing = webPublishing
self.fileRecoveryPr = fileRecoveryPr
self.webPublishObjects = webPublishObjects
def to_tree(self):
tree = super(WorkbookPackage, self).to_tree()
tree.set("xmlns", SHEET_MAIN_NS)
return tree
@property
def active(self):
for view in self.bookViews:
if view.activeTab is not None:
return view.activeTab
return 0
@property
def pivot_caches(self):
"""
Get PivotCache objects
"""
d = {}
for c in self.caches:
cache = get_rel(self.archive, self.rels, id=c.id, cls=CacheDefinition)
if cache.deps:
records = get_rel(self.archive, cache.deps, cache.id, RecordList)
else:
records = None
cache.records = records
d[c.cacheId] = cache
return d
|
{
"content_hash": "15b155f372808dc8a5fab47f390dae76",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 118,
"avg_line_length": 34.453658536585365,
"alnum_prop": 0.6434942658926802,
"repo_name": "cloudera/hue",
"id": "ab3a80aae9193329253e82b752994ea9a659c0cb",
"size": "7063",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/openpyxl-2.6.4/openpyxl/packaging/workbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quotation', '0006_orderingcompany'),
]
operations = [
migrations.AlterField(
model_name='quotation',
name='quota_number',
field=models.CharField(max_length=256, null=True, blank=True),
),
]
|
{
"content_hash": "bbcc962e7d2790d27b3141f38a0478db",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.6097560975609756,
"repo_name": "lowitty/eeep",
"id": "db470df225766296ab8ba87990a77106150d57ad",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quotation/migrations/0007_auto_20150916_1056.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1016253"
},
{
"name": "HTML",
"bytes": "459900"
},
{
"name": "JavaScript",
"bytes": "668361"
},
{
"name": "Python",
"bytes": "48455"
}
],
"symlink_target": ""
}
|
import asyncio
import getpass
import time
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
class DummyAgent(Agent):
class MyBehav(CyclicBehaviour):
async def on_start(self):
print("Starting behaviour . . .")
self.counter = 0
async def run(self):
print("Counter: {}".format(self.counter))
self.counter += 1
if self.counter > 3:
self.kill(exit_code=10)
return
await asyncio.sleep(1)
async def on_end(self):
print("Behaviour finished with exit code {}.".format(self.exit_code))
async def setup(self):
print("Agent starting . . .")
self.my_behav = self.MyBehav()
self.add_behaviour(self.my_behav)
if __name__ == "__main__":
jid1 = input("Agent JID> ")
passwd1 = getpass.getpass()
dummy = DummyAgent(jid1, passwd1)
future = dummy.start()
future.result() # Wait until the start method is finished
# wait until user interrupts with ctrl+C
while not dummy.my_behav.is_killed():
try:
time.sleep(1)
except KeyboardInterrupt:
break
dummy.stop()
|
{
"content_hash": "16ed0f9e5599a25b2b8713bc826b2ead",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 26.955555555555556,
"alnum_prop": 0.5853256389117889,
"repo_name": "javipalanca/spade",
"id": "00f6f5ded3c498135833461d19b861e2cff9fcd4",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/counter_behav.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38167"
},
{
"name": "Makefile",
"bytes": "2265"
},
{
"name": "Python",
"bytes": "167792"
}
],
"symlink_target": ""
}
|
from pkgversion import list_requirements, pep440_version, write_setup_py
from setuptools import find_packages
write_setup_py(
name='timeexecution',
version=pep440_version(),
description="Python project",
long_description=open('README.rst').read(),
author="Niels Lensink",
author_email='[email protected]',
url='https://github.com/kpn-digital/py-timeexecution',
install_requires=list_requirements('requirements/requirements-base.txt'),
packages=find_packages(exclude=['tests*']),
tests_require=['tox'],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
{
"content_hash": "4999012c843077e8aeafe804e0a3764f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 39.193548387096776,
"alnum_prop": 0.6477366255144033,
"repo_name": "snelis/timeexecution",
"id": "fd7789aa110f4723eec3b0b9f117d09fd305aba4",
"size": "1261",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup_gen.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1394"
},
{
"name": "Python",
"bytes": "22854"
}
],
"symlink_target": ""
}
|
import sys, pymongo, os, glob, re, bson.json_util, json, time, datetime, math, subprocess, base64
from bson.objectid import ObjectId
from pymongo import MongoClient
from bson.dbref import DBRef
from bson.json_util import dumps
from bson.code import Code
import string, tangelo
import csv
def run(filename=None,filecontents=None,directory=None,):
response = {}
print filename,filecontents, directory
print 'uploading ',filename
fullpath = directory+'/'+filename
fout = open(fullpath,'w')
fout.write(filecontents)
fout.close()
response['results'] = filename
# also open an empty annotation session to keep brat happy. All editable
# files require a matching <title>.ann file
ann_name = filename[:filename.find('.')]+'.ann'
annpath = directory+'/'+ann_name
print 'annotation file is:',annpath
os.system('touch '+annpath)
return bson.json_util.dumps(response)
|
{
"content_hash": "46634cb5d3b61f2b62fe274017b0b5c9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 97,
"avg_line_length": 32.86666666666667,
"alnum_prop": 0.6693711967545639,
"repo_name": "curtislisle/nanomaterial-dashboard",
"id": "d94120e316a638a22c5a6cd328d6d8e46c340a0c",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nanoUtilities/service/uploadTexts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38958"
},
{
"name": "HTML",
"bytes": "48638"
},
{
"name": "JavaScript",
"bytes": "438967"
},
{
"name": "Python",
"bytes": "85755"
},
{
"name": "R",
"bytes": "278"
},
{
"name": "Shell",
"bytes": "2528"
}
],
"symlink_target": ""
}
|
from curator import curator
from mock import patch, Mock
from . import CuratorTestCase
class TestCloseIndex(CuratorTestCase):
def test_positive(self):
self.create_index('test_index')
self.client.indices.close('test_index')
self.assertTrue(curator.index_closed(self.client, 'test_index'))
def test_negative(self):
self.create_index('test_index')
self.assertFalse(curator.index_closed(self.client, 'test_index'))
class TestCloseIndex(CuratorTestCase):
def test_index_will_be_closed(self):
self.create_index('test_index')
self.assertIsNone(curator._close_index(self.client, 'test_index'))
index_metadata = self.client.cluster.state(
index='test_index',
metric='metadata',
)
self.assertEquals('close', index_metadata['metadata']['indices']['test_index']['state'])
def test_closed_index_will_be_skipped(self):
self.create_index('test_index')
self.client.indices.close(index='test_index')
self.assertTrue(curator._close_index(self.client, 'test_index'))
index_metadata = self.client.cluster.state(
index='test_index',
metric='metadata',
)
self.assertEquals('close', index_metadata['metadata']['indices']['test_index']['state'])
class TestDeleteIndex(CuratorTestCase):
def test_index_will_be_deleted(self):
self.create_index('test_index')
self.assertIsNone(curator._delete_index(self.client, 'test_index'))
self.assertFalse(self.client.indices.exists('test_index'))
class TestBloomIndex(CuratorTestCase):
def test_bloom_filter_will_be_disabled(self):
self.create_index('test_index')
self.assertIsNone(curator._bloom_index(self.client, 'test_index'))
settings = self.client.indices.get_settings(index='test_index')
self.assertEquals('false', settings['test_index']['settings']['index']['codec']['bloom']['load'])
def test_closed_index_will_be_skipped(self):
self.create_index('test_index')
self.client.indices.close(index='test_index')
self.assertTrue(curator._bloom_index(self.client, 'test_index'))
index_metadata = self.client.cluster.state(
index='test_index',
metric='metadata',
)
self.assertEquals('close', index_metadata['metadata']['indices']['test_index']['state'])
class TestOptimizeIndex(CuratorTestCase):
def test_closed_index_will_be_skipped(self):
self.create_index('test_index')
self.client.indices.close(index='test_index')
self.assertTrue(curator._optimize_index(self.client, 'test_index'))
index_metadata = self.client.cluster.state(
index='test_index',
metric='metadata',
)
self.assertEquals('close', index_metadata['metadata']['indices']['test_index']['state'])
@patch('curator.curator.get_segmentcount')
def test_optimized_index_will_be_skipped(self, get_segmentcount):
get_segmentcount.return_value = 1, 4
self.create_index('test_index')
self.assertTrue(curator._optimize_index(self.client, 'test_index', max_num_segments=4))
get_segmentcount.assert_called_once_with(self.client, 'test_index')
@patch('curator.curator.index_closed')
@patch('curator.curator.get_segmentcount')
def test_unoptimized_index_will_be_optimized(self, get_segmentcount, index_closed):
get_segmentcount.return_value = 1, 40
index_closed.return_value = False
client = Mock()
self.create_index('test_index')
self.assertIsNone(curator._optimize_index(client, 'test_index', max_num_segments=4))
get_segmentcount.assert_called_once_with(client, 'test_index')
index_closed.assert_called_once_with(client, 'test_index')
client.indices.optimize.assert_called_once_with(index='test_index', max_num_segments=4)
class TestSegmentCount(CuratorTestCase):
def test_simple(self):
self.create_index('test_index', shards=2)
self.client.index(index='test_index', doc_type='t', id=42, body={})
self.client.indices.refresh(index='test_index')
self.assertEquals((2, 1), curator.get_segmentcount(self.client, 'test_index'))
|
{
"content_hash": "1a6781701cd2047e76db5c4ffaa876ef",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 105,
"avg_line_length": 42.68,
"alnum_prop": 0.6637769447047798,
"repo_name": "waja/elasticsearch-curator",
"id": "6534b8a8924eba225fb72f88df7b072d69db5459",
"size": "4268",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test_curator/integration/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35807"
}
],
"symlink_target": ""
}
|
import os
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_mona.util import base_units
from electrum_mona.storage import StorageReadWriteError
from ...i18n import _
from .label_dialog import LabelDialog
Builder.load_string('''
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: ''
disable_new: True
BoxLayout:
orientation: 'vertical'
padding: '10dp'
FileChooserIconView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '*.*'
path: root.path
rootpath: root.path
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 3
size_hint_y: 0.1
Button:
id: new_button
disabled: root.disable_new
size_hint: 0.1, None
height: '48dp'
text: _('New')
on_release:
popup.dismiss()
root.new_wallet(wallet_selector.path)
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open')
disabled: not wallet_selector.selection
on_release:
popup.dismiss()
root.callback(wallet_selector.selection[0])
''')
class WalletDialog(Factory.Popup):
def __init__(self, path, callback, disable_new):
Factory.Popup.__init__(self)
self.path = path
self.callback = callback
self.disable_new = disable_new
def new_wallet(self, dirname):
assert self.disable_new is False
def cb(filename):
if not filename:
return
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
self.callback(os.path.join(dirname, filename))
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
|
{
"content_hash": "75e79c40d9612419b2991bf15b5129ee",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 103,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.5364583333333334,
"repo_name": "wakiyamap/electrum-mona",
"id": "b85898d60904a367020a01c2c17396b67b9fbfec",
"size": "2112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_mona/gui/kivy/uix/dialogs/wallets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13043"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2162"
},
{
"name": "NSIS",
"bytes": "7779"
},
{
"name": "Python",
"bytes": "4381566"
},
{
"name": "Ruby",
"bytes": "16375"
},
{
"name": "Shell",
"bytes": "100799"
},
{
"name": "kvlang",
"bytes": "67448"
}
],
"symlink_target": ""
}
|
""" The workbench interface. """
# Enthought library imports.
from traits.api import Event, Instance, Interface, List, Str
from traits.api import provides, VetoableEvent
# Local imports.
from user_perspective_manager import UserPerspectiveManager
from window_event import WindowEvent, VetoableWindowEvent
from workbench_window import WorkbenchWindow
class IWorkbench(Interface):
""" The workbench interface. """
#### 'IWorkbench' interface ###############################################
# The active workbench window (the last one to get focus).
active_window = Instance(WorkbenchWindow)
# The optional application scripting manager.
script_manager = Instance('apptools.appscripting.api.IScriptManager')
# A directory on the local file system that we can read and write to at
# will. This is used to persist window layout information, etc.
state_location = Str
# The optional undo manager.
undo_manager = Instance('apptools.undo.api.IUndoManager')
# The user defined perspectives manager.
user_perspective_manager = Instance(UserPerspectiveManager)
# All of the workbench windows created by the workbench.
windows = List(WorkbenchWindow)
#### Workbench lifecycle events ####
# Fired when the workbench is about to exit.
#
# This can be caused by either:-
#
# a) The 'exit' method being called.
# b) The last open window being closed.
exiting = VetoableEvent
# Fired when the workbench has exited.
#
# This is fired after the last open window has been closed.
exited = Event
#### Window lifecycle events ####
# Fired when a workbench window has been created.
window_created = Event(WindowEvent)
# Fired when a workbench window is opening.
window_opening = Event(VetoableWindowEvent)
# Fired when a workbench window has been opened.
window_opened = Event(WindowEvent)
# Fired when a workbench window is closing.
window_closing = Event(VetoableWindowEvent)
# Fired when a workbench window has been closed.
window_closed = Event(WindowEvent)
###########################################################################
# 'IWorkbench' interface.
###########################################################################
def create_window(self, **kw):
""" Factory method that creates a new workbench window. """
def edit(self, obj, kind=None, use_existing=True):
""" Edit an object in the active workbench window. """
def exit(self):
""" Exit the workbench.
This closes all open workbench windows.
This method is not called when the user clicks the close icon. Nor when
they do an Alt+F4 in Windows. It is only called when the application
menu File->Exit item is selected.
"""
def get_editor(self, obj, kind=None):
""" Return the editor that is editing an object.
Returns None if no such editor exists.
"""
def get_editor_by_id(self, id):
""" Return the editor with the specified Id.
Returns None if no such editor exists.
"""
#### EOF ######################################################################
|
{
"content_hash": "8918e4c13a3beecc1de50c2fa8a17209",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 30.8,
"alnum_prop": 0.6230674087816945,
"repo_name": "brett-patterson/pyface",
"id": "2534d6830329cb253030a6f355f6306fdae7a217",
"size": "3234",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyface/workbench/i_workbench.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "648"
},
{
"name": "Python",
"bytes": "2371056"
}
],
"symlink_target": ""
}
|
from aldryn_search.utils import get_index_base
from .models import Service
class ServiceIndex(get_index_base()):
haystack_use_for_indexing = True
index_title = True
@staticmethod
def get_title(obj):
return obj.name
@staticmethod
def get_index_queryset(language):
# For this language's index, don't include services with no name
# provided in this language.
return Service.objects.filter(status=Service.STATUS_CURRENT).exclude(**{
'name_%s' % language: ''
})
@staticmethod
def get_model():
return Service
@staticmethod
def get_search_data(service, language, request):
description = getattr(service, 'description_%s' % language, '')
additional_info = getattr(service, 'additional_info_%s' % language, '')
return ' '.join((
service.provider.name,
service.name,
service.area_of_service.name,
description,
additional_info,
service.type.name,
))
|
{
"content_hash": "ab73b550e2f248f9ec350fee724df30f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 28.405405405405407,
"alnum_prop": 0.6079923882017126,
"repo_name": "theirc/ServiceInfo",
"id": "0dd0a642db5a7bd05ae46ee109016fbf6c6904e8",
"size": "1051",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "services/search_indexes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91208"
},
{
"name": "HTML",
"bytes": "169211"
},
{
"name": "JavaScript",
"bytes": "126261"
},
{
"name": "Python",
"bytes": "486647"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
}
|
from os.path import dirname, join
import json
import re
import sys
import urllib2
# ========================================
def unicode_and_strip (_str):
return unicode(_str).strip()
def remove_empty_string_and_nbsp_in_list (_list):
return [ unicode_and_strip(x).replace(u'\xa0', u' ') for x in _list if unicode_and_strip(x) ]
def split_multiples_info (_str):
return remove_empty_string_and_nbsp_in_list(
re.compile('[^0-9]').split(_str)
)
# ========================================
# set path
print 'Setting paths'
current_path = dirname(__file__)
data_path = join(current_path, 'travis_data')
# import bs4
sys.path.append(join(current_path, 'libs/bs4.zip'))
from bs4 import BeautifulSoup, SoupStrainer
# set base url and locations
base_url = 'http://www.utar.edu.my/accomList.jsp'
locations = {
'Bandar Tun Hussien Onn / Mahkota Cheras / Balakong / Kajang': 'BTHO',
'Kampar': 'KP',
'Bandar Sungai Long': 'SL',
}
# tab_id_list = ['Room']
tab_id_list = ['Room', 'Roomate', 'Apartment/Condominium', 'House']
# loop the loop
for location_full_name, location_code in locations.iteritems():
print 'Getting data for {}'.format(location_full_name)
# prep some stuff
current_data_path = join(data_path, '{}.json'.format(location_code.lower()))
current_data_json = {}
# send the request
req = urllib2.Request(base_url, 'fcode={}'.format(location_code))
res = urllib2.urlopen(req)
# based bs4
strainer = SoupStrainer(id="tabs")
soup = BeautifulSoup(res.read(), 'html.parser', parse_only=strainer)
tabs = soup.find('div', id="tabs").find_all('div', recursive=False)
for tab in tabs:
# gotta have an id
if not (tab.has_attr('id') and tab['id'] in tab_id_list):
continue
tab_id = tab['id']
current_data_json[tab_id] = []
print ' Getting data for {}'.format(tab_id)
rows = tab.find_all('tr')
for row in rows:
# gotta have this wacky event
if not row.has_attr('onmouseout'):
continue
# find the columns
cols = row.find_all('td', recursive=False)
# prepare current row json
current_row = {
'name': cols[1].find('strong').contents[0],
'link': cols[1].find('a')['href'],
'office': [],
'mobile': [],
'email': 'N/A',
'info': [],
'price': [],
'size': [],
'count': [],
'address': cols[3].text.strip(),
'remark': [],
}
# contact
temp = cols[1].find('b', text='H/P No.:')
if temp is not None:
current_row['mobile'] = split_multiples_info(temp.next_sibling)
temp = cols[1].find('b', text='Office No.:')
if temp is not None:
current_row['office'] = split_multiples_info(temp.next_sibling)
temp = cols[1].find('b', text='Email:')
if temp is not None:
current_row['email'] = unicode_and_strip(temp.next_sibling)
# if no means of contact, why bother?
if len(current_row['office']) == 0 and len(current_row['mobile']) == 0 and current_row['email'] == 'N/A':
continue
# info
temp = cols[2]
for x in temp.find_all('font'):
x.extract()
temp = temp.find_all(text=True)
current_row['info'] = remove_empty_string_and_nbsp_in_list(temp)
for info in current_row['info']:
# price
temp = re.compile('RM (\d+)').search(info)
if temp is not None:
current_row['price'].append( int(temp.group(1)) )
# size and count
temp = re.compile('(Small|Middle|Master) Bedroom \/ RM (\d+) \/ (.*?)persons?').search(info)
if temp is not None:
current_row['size'].append(
unicode_and_strip(temp.group(1))
)
current_row['count'].append(
unicode_and_strip(temp.group(3))
)
# address
current_row['address'] = re.compile('(\\r)?\\n').sub(' ', current_row['address']) \
.replace(',,', ',')
# remark
temp = cols[4].find_all(text=True)
current_row['remark'] = remove_empty_string_and_nbsp_in_list(temp)
# into the json
current_data_json[tab_id].append(current_row)
# write to json
f = open(current_data_path, 'w')
f.truncate()
# f.write(json.dumps(current_data_json, indent=4))
f.write(json.dumps(current_data_json))
f.close()
print 'Done'
|
{
"content_hash": "c5b8fdfcd484bb307b1f164902472e15",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 108,
"avg_line_length": 26.22151898734177,
"alnum_prop": 0.6068066618392469,
"repo_name": "altbdoor/utar-accommodation-list",
"id": "375bde0dc0fa973716a746d577344018408ee8c8",
"size": "4190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4190"
},
{
"name": "Shell",
"bytes": "1002"
}
],
"symlink_target": ""
}
|
__author__ = "mozman <[email protected]>"
import unittest
from dxfwrite.base import *
from dxfwrite.sections import create_section
class TestSection(unittest.TestCase):
def test_empty_header_section(self):
dxf = dxfstr(create_section('HEADER'))
self.assertTrue(dxf.startswith(" 0\nSECTION\n 2\nHEADER\n"))
self.assertTrue(dxf.endswith("\n 0\nENDSEC\n"))
def test_empty_entities_section(self):
dxf = dxfstr(create_section('ENTITIES'))
self.assertTrue(dxf.startswith(" 0\nSECTION\n 2\nENTITIES\n"))
self.assertTrue(dxf.endswith("\n 0\nENDSEC\n"))
def test_empty_blocks_section(self):
dxf = dxfstr(create_section('BLOCKS'))
self.assertTrue(dxf.startswith(" 0\nSECTION\n 2\nBLOCKS\n"))
self.assertTrue(dxf.endswith("\n 0\nENDSEC\n"))
def test_empty_tables_section(self):
dxf = dxfstr(create_section('TABLES'))
self.assertTrue(dxf.startswith(" 0\nSECTION\n 2\nTABLES\n"))
self.assertTrue(dxf.endswith("\n 0\nENDSEC\n"))
def test_header_vars_point_3d(self):
header = create_section('HEADER')
header['$EXTMIN'] = (0, 0, 0)
header['$EXTMAX'] = (99, 117, 0)
extmax = header['$EXTMAX']
self.assertEqual(extmax[0], 99)
self.assertEqual(extmax[1], 117)
self.assertEqual(extmax[2], 0)
dxf = dxfstr(header)
self.assertTrue(' 9\n$EXTMIN\n 10\n0.0\n 20\n0.0\n 30\n0.0\n' in dxf)
self.assertTrue(' 9\n$EXTMAX\n 10\n99.0\n 20\n117.0\n 30\n0.0\n' in dxf)
def test_header_vars_string(self):
header = create_section('HEADER')
header['$ACADVER'] = 'AC1009'
dxf = dxfstr(header)
self.assertTrue(' 9\n$ACADVER\n 1\nAC1009\n' in dxf)
def test_header_vars_float(self):
header = create_section('HEADER')
header['$ANGBASE'] = 30
dxf = dxfstr(header)
self.assertTrue(' 9\n$ANGBASE\n 50\n30.0\n' in dxf)
def test_get_section_error(self):
self.assertRaises(ValueError, create_section, 'MOZMAN')
if __name__=='__main__':
unittest.main()
|
{
"content_hash": "737733d68fc8073b78a3880a65d66955",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 81,
"avg_line_length": 35.847457627118644,
"alnum_prop": 0.6184397163120567,
"repo_name": "sbarton272/AcousticBarcodes-Explorations",
"id": "19aee4d5d7740c899048d84b876a0a1625989f24",
"size": "2234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barcodes/dxfwrite/tests/test_sections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "8313"
},
{
"name": "Python",
"bytes": "725409"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
import re
import inspect
import pydoc
import gtk
from cStringIO import StringIO
#
# For most objects, we simply call their repr() function, but we handle
# tuples, lists, and dicts specially. We handle them in one of two ways:
#
# - As a list of items, one per line.
#
# Dictionaries are always formatted this way. Lists and Tuples are
# formatted this way if them items in the sequence have long
# or multi-line representations.
#
# - As a list of items, wrapped into multiple lines
#
# Lists and Tuples are formatted this way if them items in the sequence
# have short, single-line representations.
#
# total maximum number of lines
_MAX_LINES = 17
# if a sequence has items longer than this, force separate-lines mode
_MAX_WRAPPED_ITEM_LEN = 20
# maximum number of lines for a wrapped sequence
_MAX_WRAPPED_ITEM_LINES = 5
# max line width when line-wrapping
_MAX_WIDTH = 80
# Common parameters to the functions below:
#
# open: opening delimeter
# close: closing delimeter
# nl: "what to use for break lines". The idea is that if you are at an
# indentation level of 3, and use "\n " to break lines, the
# broken lines stay at indentation level 3.
# object_stack: stack of objects currently being formatted. This is
# used to catch recursive data structures.
def __format_separate(sequence, open, close, nl):
# Used to format dictionaries, lists, and tuples as a list of items
# 1-per-line.
buf = StringIO()
buf.write(open)
lines = 1
last_str, last_lines = None, 0
for str, item_lines in sequence:
if last_str != None:
# Process the last item, we'll have one more after it
new_lines = lines + last_lines
# The new line takes us over the maximum item count, so we
# won't have room for the next item
if new_lines > _MAX_LINES:
buf.write("...")
last_str = None
break
buf.write(last_str)
buf.write(",")
buf.write(nl)
lines = new_lines
last_str = str
last_lines = item_lines
if last_str != None:
buf.write(last_str)
buf.write(close)
return buf.getvalue(), lines
def __format_wrapped(sequence, open, close, nl):
# Used to format lists, and tuples as a line-wrapped list of items
lines = 1
buf = StringIO()
buf.write(open)
available_width = _MAX_WIDTH - (len(nl) - 1)
last_str, last_lines = None, 0
count = 0
count_on_line = 0
for str, item_lines in sequence:
if item_lines > 1:
return None
if len(str) > _MAX_WRAPPED_ITEM_LEN:
return None
if last_str != None:
# Process the last item, we'll have one more after it
new_available_width = available_width - (len(last_str) + 1) # len(last_str) + len(",")
if count_on_line > 0:
new_available_width -= 1 # len(" ")
if lines == _MAX_WRAPPED_ITEM_LINES:
# An ellipsis won't fit after this item, and most likely the item after either
if new_available_width < 4 + len(close): # len(" ...") + len(close)
if count_on_line > 0:
buf.write(" ")
buf.write("...")
last_str = None
break
else:
if new_available_width < 0:
buf.write(nl)
count_on_line = 0
lines += 1
available_width = _MAX_WIDTH - (len(nl) - 1)
if count_on_line > 0:
buf.write(" ")
available_width -= 1
buf.write(last_str)
buf.write(",")
available_width -= (len(last_str) + 1)
count_on_line += 1
last_str = str
last_lines = item_lines
if last_str != None:
new_available_width = available_width - (len(last_str) + len(close))
if count_on_line > 0:
new_available_width -= 1
if new_available_width < 0:
buf.write(nl)
elif count_on_line > 0:
buf.write(" ")
buf.write(last_str)
buf.write(close)
return buf.getvalue(), lines
def __format_dict(obj, nl, object_stack):
nl = nl + " "
def iter():
for key, value in sorted(obj.items()):
key_str, key_lines = __format(key, nl, object_stack)
value_str, value_lines = __format(value, nl, object_stack)
yield key_str + ": " + value_str, key_lines + value_lines - 1
return __format_separate(iter(), "{", "}", nl)
def __format_sequence(obj, open, close, nl, object_stack):
nl = nl + " "
seq = (__format(x, nl, object_stack) for x in obj)
result = __format_wrapped(seq, open, close, nl)
if result == None:
seq = (__format(x, nl, object_stack) for x in obj)
result = __format_separate(seq, open, close, nl)
return result
def __format(obj, nl, object_stack):
for o in object_stack:
if obj is o:
return "<Recursion>", 1
object_stack += (obj,)
t = type(obj)
repr_attr = getattr(t, '__repr__', None)
if issubclass(t, dict) and repr_attr is dict.__repr__:
return __format_dict(obj, nl, object_stack)
elif issubclass(t, list) and repr_attr is list.__repr__:
return __format_sequence(obj, '[', ']', nl, object_stack)
elif issubclass(t, tuple) and repr_attr is tuple.__repr__:
return __format_sequence(obj, '(', ')', nl, object_stack)
else:
s = repr(obj)
return s.replace("\n", nl), 1 + s.count("\n")
def format(obj):
"""Format obj as text
This in spirit similar to pprint.format(), but differs in the details of
how the formatting done. Sequences and dictionaries are trunctated as
necessary to keep the entire display compact.
"""
return __format(obj, "\n", ())[0]
def insert_formatted(buf, iter, obj, heading_type_tag, inline_type_tag, value_tag):
"""Insert a nicely-formatted display of obj into a gtk.TextBuffer
@param buf: the buffer to insert the formatted display into
@param iter: the location to insert the formatted display
@param obj: the object to display in the buffer
@param heading_type_tag: tag to use for the object type if we are outputting a block
@param inline_type_tag: tag to use for the object type if we are outputting a single line
@param value_tag: the tag to use for the objects value
"""
text = format(obj)
if text.find("\n") >= 0:
insert_with_tag(buf, iter, pydoc.describe(obj), heading_type_tag)
buf.insert(iter, "\n")
else:
insert_with_tag(buf, iter, pydoc.describe(obj), inline_type_tag)
buf.insert(iter, ": ")
insert_with_tag(buf, iter, text, value_tag)
def is_data_object(obj):
"""Return True of obj holds data
This routine is used to distinguish objects we should show help
for (like modules, classes, methods, and so forth) from other
types of object.
"""
# Test borrowed from pydoc.py
return not (inspect.ismodule(obj) or
inspect.isclass(obj) or
inspect.isroutine(obj) or
inspect.isgetsetdescriptor(obj) or
inspect.ismemberdescriptor(obj) or
isinstance(obj, property))
def insert_with_tag(buf, iter, text, tag):
"""Insert text into a gtk.TextBuffer, then tag it with the given tag"""
mark = buf.create_mark(None, iter, True)
buf.insert(iter, text)
start = buf.get_iter_at_mark(mark)
buf.apply_tag(tag, start, iter)
buf.delete_mark(mark)
####################################################################################
if __name__ == "__main__":
CHOMP_RE = re.compile(r"^\s*\|", re.MULTILINE)
def do_test(obj, expected):
# Trim off initial and trailing blank lines, and use the amount of white
# space on the first remaining line as an overall indent to remove
expected = re.sub("^\s*\n","", expected)
expected = re.sub("\n\s*$","", expected)
initial_white = len(re.match(r"^\s*", expected).group(0))
expected = "\n".join([s[initial_white:] for s in expected.split("\n")])
expected = CHOMP_RE.sub("", expected)
result = format(obj)
if result != expected:
print "For %s,\nGot:\n%s\nExpected:\n%s" % (obj, repr(result), repr(expected))
# We whack down the maximums to reduce the size of our test cases
_MAX_LINES = 5
_MAX_WRAPPED_ITEM_LINES = 3
_MAX_WIDTH = 40
do_test(1, "1")
do_test({'a': 1, 'b': 2},
"""
{'a': 1,
'b': 2}
""")
do_test(dict(((x, x) for x in range(5))),
"""
{0: 0,
1: 1,
2: 2,
3: 3,
4: 4}
""")
do_test(dict(((x, x) for x in range(6))),
"""
{0: 0,
1: 1,
2: 2,
3: 3,
...}
""")
# ----------------------------------------
do_test(range(100),
"""
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, ...]
""")
do_test(["a" * 9] * 4,
"""
['aaaaaaaaa', 'aaaaaaaaa', 'aaaaaaaaa',
'aaaaaaaaa']
""")
try:
import numpy
do_test([numpy.float64(1.0)],
"""
[1.0]
""")
do_test([numpy.float64(1.0), numpy.float64(1.0)],
"""
[1.0, 1.0]
""")
except ImportError:
pass
a = [1]
a.append(a)
do_test(a, "[1, <Recursion>]")
|
{
"content_hash": "4a25bcc99d3de6caf5c8f320e0f7aafa",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 98,
"avg_line_length": 30.033033033033032,
"alnum_prop": 0.5357464253574643,
"repo_name": "lamby/pkg-reinteract",
"id": "4d5b1263e334a1b4911823d0e3dce34de18ce005",
"size": "10268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/reinteract/data_format.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "8876"
},
{
"name": "C++",
"bytes": "416"
},
{
"name": "Objective-C",
"bytes": "23124"
},
{
"name": "Python",
"bytes": "446059"
},
{
"name": "Shell",
"bytes": "46924"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"discover_road_runner.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "5cabb1a3955c02a1294515794667dbe5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 26.7,
"alnum_prop": 0.6479400749063671,
"repo_name": "pzrq/discover-road-runner",
"id": "acff17fc38a4a35d2a427bcda1d7147b1e1eb231",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28202"
}
],
"symlink_target": ""
}
|
"""
Atomic Positions Parser
###############################
"""
#import re
#import pandas as pd
#from exa import Parser
#from exa.typed import Typed
#from exatomic.core.atom import Atom
#
#
#class AtomicPositions(Parser):
# """Parser for the 'ATOMIC_POSITIONS' section of an output."""
# _start = re.compile(r"^\s*ATOMIC_POSITIONS|atomic_positions")
# _int0 = 1
# _cols = ("symbol", "x", "y", "z")
# atom = Typed(Atom, doc="Table of nuclear coordinates.")
#
# def parse_atom(self, length=None):
# """
# Parse the atom dataframe.
#
# Args:
# length (str): String length unit
# """
# if length is None and "angstrom" in str(self[0]).lower():
# length = "Angstrom"
# else:
# length = "au"
# if "end" in str(self[-2]).lower():
# slce = slice(self._int0, -2)
# else:
# slce = slice(self._int0, -1)
# atom = pd.read_csv(self[slce].to_stream(), delim_whitespace=True,
# names=self._cols)
# self.atom = Atom.from_xyz(atom, unit=length)
#
# def _parse_end(self, starts):
# """Find the next blank line."""
# return [self.next_blank_line(cursor=i[0]) for i in starts]
|
{
"content_hash": "aad08cf383e483a1c2501668fdb60214",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 31.075,
"alnum_prop": 0.5406275140788415,
"repo_name": "exa-analytics/atomic",
"id": "de468038206c5bf4e37561feefc545a05d6dcab6",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exatomic/qe/core/atomic_positions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "76185"
},
{
"name": "Python",
"bytes": "82565"
}
],
"symlink_target": ""
}
|
import unittest
import os
from programy.config.file.json import JSONConfigurationFile
from programy.config.client.client import ClientConfiguration
class JsonConfigurationFileTests(unittest.TestCase):
def test_load_from_file(self):
client_config = ClientConfiguration()
json = JSONConfigurationFile(client_config)
self.assertIsNotNone(json)
json.load_from_file(os.path.dirname(__file__)+"/test_json.json", ",")
self.assertIsNotNone(json.json_data)
brain = json.get_section("brain")
self.assertIsNotNone(brain)
files = json.get_section("files", brain)
self.assertIsNotNone(files)
aiml = json.get_section("aiml", files)
self.assertIsNotNone(aiml)
files = json.get_section("files", aiml)
self.assertIsNotNone(files)
self.assertEqual(files, "/aiml")
extension = json.get_section("extension", aiml)
self.assertIsNotNone(extension)
self.assertEqual(extension, ".aiml")
directories = json.get_section("directories", aiml)
self.assertIsNotNone(directories)
self.assertEqual(directories, True)
def test_load_from_text(self):
client_config = ClientConfiguration()
json = JSONConfigurationFile(client_config)
self.assertIsNotNone(json)
json.load_from_text("""
{
"brain": {
"supress_warnings": false,
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true,
"files": {
"aiml": {
"files": "/aiml",
"extension": ".aiml",
"directories": true
},
"sets": {
"files": "/sets",
"extension": ".txt",
"directories": false
},
"maps": {
"files": "/maps",
"extension": ".txt",
"directories": true
},
"denormal": "denormal.txt",
"normal": "normal.txt",
"gender": "gender.txt",
"person": "person.txt",
"person2": "person2.txt",
"predicates": "predicates.txt",
"pronouns": "pronouns.txt",
"properties": "properties.txt",
"triples": "triples.txt",
"preprocessors": "preprocessors.txt",
"postprocessors": "postprocessors.txt"
},
"services": {
"REST": {
"path": "programy.utils.services.rest.GenericRESTService"
},
"Pannous": {
"path": "programy.utils.services.pannous.PannousService"
},
"Pandora": {
"path": "programy.utils.services.pandora.PandoraService"
},
"Wikipedia": {
"path": "programy.utils.services.wikipedia.WikipediaService"
}
}
},
"bot": {
"prompt": ">>>",
"default_response": "Sorry, I don't have an answer for that!",
"exit_response": "So long, and thanks for the fish!",
"initial_question": "Hi, how can I help you>"
}
}""", ",")
|
{
"content_hash": "faff8a52057b31c8ec66194355c65dd3",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 32.989583333333336,
"alnum_prop": 0.5342595516261446,
"repo_name": "JustArchi/program-y",
"id": "70d57123abd5f966d9d98baf4e166671a3616eb0",
"size": "3167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/config/file/test_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "1027605"
},
{
"name": "Shell",
"bytes": "2835"
}
],
"symlink_target": ""
}
|
from __future__ import division
import rospy
from std_msgs.msg import Float64
from math import sin
def talker():
front_left_pub = rospy.Publisher('/calf/front_left_pod_joint_controller/command', Float64)
front_right_pub = rospy.Publisher('/calf/front_right_pod_joint_controller/command', Float64)
back_left_pub = rospy.Publisher('/calf/back_left_pod_joint_controller/command', Float64)
back_right_pub = rospy.Publisher('/calf/back_right_pod_joint_controller/command', Float64)
left_drive_pub = rospy.Publisher('/calf/left_drive_controller/command', Float64)
right_drive_pub = rospy.Publisher('/calf/right_drive_controller/command', Float64)
rospy.init_node('test_bounce', anonymous=True)
i = 0;
r = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
i += 1
val = sin(i/20)/2+0.75;
front_left_pub.publish(val)
front_right_pub.publish(val)
back_left_pub.publish(-val)
back_right_pub.publish(-val)
left_drive_pub.publish(sin(i/40)*3)
right_drive_pub.publish(sin(i/40)*3)
r.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass
|
{
"content_hash": "17d26667551f524b6f807f8cb65d694b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 96,
"avg_line_length": 36.21212121212121,
"alnum_prop": 0.6644351464435146,
"repo_name": "OAkyildiz/walrus",
"id": "51710dc71ed5f4780ba341d1c9d7b66c45f4ab7f",
"size": "1247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "prototyping/calf_control/scripts/test_bounce.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "557548"
},
{
"name": "Assembly",
"bytes": "6063"
},
{
"name": "Batchfile",
"bytes": "60"
},
{
"name": "C",
"bytes": "15225884"
},
{
"name": "C++",
"bytes": "11347351"
},
{
"name": "CMake",
"bytes": "30018"
},
{
"name": "CSS",
"bytes": "3090"
},
{
"name": "EmberScript",
"bytes": "1405"
},
{
"name": "HTML",
"bytes": "1985093"
},
{
"name": "Java",
"bytes": "395455"
},
{
"name": "JavaScript",
"bytes": "44318"
},
{
"name": "Logos",
"bytes": "115688"
},
{
"name": "Makefile",
"bytes": "275482"
},
{
"name": "Nginx",
"bytes": "161"
},
{
"name": "Objective-C",
"bytes": "292372"
},
{
"name": "Processing",
"bytes": "57947"
},
{
"name": "Python",
"bytes": "69126"
},
{
"name": "Shell",
"bytes": "59945"
},
{
"name": "XC",
"bytes": "8562"
},
{
"name": "XS",
"bytes": "8334"
},
{
"name": "XSLT",
"bytes": "31406"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^restart_workers$', views.restart_workers,
name='system.restart_workers'),
url(r'^test/(?P<test_run_id>[0-9]+)/result$', views.test_result,
name='system.test_result'),
url(r'^start_tests/?$', views.start_tests,
name='system.start_tests'),
# Not Django admin
url(r'^admin/$', views.test_suites, name='system.test_suites'),
url(r'^admin/test_suite/([^/]+)$', views.test_suites,
name='system.edit_test_suite'),
url(r'^admin/generate_token/?$', views.generate_token,
name='system.generate_token'),
url(r'^admin/create_edit_test_suite/([^/]+)?$',
views.create_edit_test_suite, name='system.create_edit_test_suite'),
url(r'^admin/delete_test_suite/([^/]+)$', views.delete_test_suite,
name='system.delete_test_suite'),
url(r'^admin/debug_in_worker/(\d+)$', views.debug_in_worker,
name='system.debug_in_worker'),
url(r'^admin/start_remote_debugger/(\d+)$', views.start_remote_debugger,
name='system.start_remote_debugger'),
# socket.io debug URLs:
url(r'^socket\.io', views.socketio, name='system.socketio'),
# url(r'^socket\.io/debug/(?P<worker_id>\d+)/(?P<transport>[^/]+).*$',
# views.socketio_debug, name='system.socketio_debug'),
url(r'^$', views.status, name='system.status'),
)
|
{
"content_hash": "08cd94c9033fb86a2e4cc096c628676e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 44.15625,
"alnum_prop": 0.6220806794055201,
"repo_name": "kumar303/jstestnet",
"id": "362b78aeafca096696a7dcf59f1d347d856679e8",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jstestnet/system/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2073"
},
{
"name": "JavaScript",
"bytes": "83985"
},
{
"name": "Python",
"bytes": "138334"
}
],
"symlink_target": ""
}
|
from twisted.trial import unittest
from buildbot.steps.package.rpm import mock
from buildbot.status.results import SUCCESS
from buildbot.test.util import steps
from buildbot.test.fake.remotecommand import ExpectShell, Expect
from buildbot import config
class TestMock(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_root(self):
self.assertRaises(config.ConfigErrors, lambda :
mock.Mock())
def test_class_attrs(self):
step = self.setupStep(mock.Mock(root='TESTROOT'))
self.assertEqual(step.command, ['mock', '--root', 'TESTROOT'])
def test_success(self):
self.setupStep(mock.Mock(root='TESTROOT'))
self.expectCommands(
Expect('rmdir', {'dir': ['build/build.log', 'build/root.log',
'build/state.log']})
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['mock', '--root', 'TESTROOT'],
logfiles={'build.log': 'build.log',
'root.log': 'root.log',
'state.log': 'state.log'})
+0)
self.expectOutcome(result=SUCCESS, status_text=["'mock", '--root', "...'"])
return self.runStep()
def test_resultdir_success(self):
self.setupStep(mock.Mock(root='TESTROOT', resultdir='RESULT'))
self.expectCommands(
Expect('rmdir', {'dir': ['build/RESULT/build.log',
'build/RESULT/root.log',
'build/RESULT/state.log']})
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['mock', '--root', 'TESTROOT',
'--resultdir', 'RESULT'],
logfiles={'build.log': 'RESULT/build.log',
'root.log': 'RESULT/root.log',
'state.log': 'RESULT/state.log'})
+0)
self.expectOutcome(result=SUCCESS, status_text=["'mock", '--root', "...'"])
return self.runStep()
class TestMockBuildSRPM(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_spec(self):
self.assertRaises(config.ConfigErrors, lambda :
mock.MockBuildSRPM(root='TESTROOT'))
def test_success(self):
self.setupStep(mock.MockBuildSRPM(root='TESTROOT', spec="foo.spec"))
self.expectCommands(
Expect('rmdir', {'dir': ['build/build.log', 'build/root.log',
'build/state.log']})
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['mock', '--root', 'TESTROOT',
'--buildsrpm', '--spec', 'foo.spec',
'--sources', '.'],
logfiles={'build.log': 'build.log',
'root.log': 'root.log',
'state.log': 'state.log'},)
+0)
self.expectOutcome(result=SUCCESS, status_text=['mock buildsrpm'])
return self.runStep()
class TestMockRebuild(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_srpm(self):
self.assertRaises(config.ConfigErrors, lambda :
mock.MockRebuild(root='TESTROOT'))
def test_success(self):
self.setupStep(mock.MockRebuild(root='TESTROOT', srpm="foo.src.rpm"))
self.expectCommands(
Expect('rmdir', {'dir': ['build/build.log', 'build/root.log',
'build/state.log']})
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['mock', '--root', 'TESTROOT',
'--rebuild', 'foo.src.rpm'],
logfiles={'build.log': 'build.log',
'root.log': 'root.log',
'state.log': 'state.log'},)
+0)
self.expectOutcome(result=SUCCESS, status_text=['mock rebuild srpm'])
return self.runStep()
|
{
"content_hash": "9cb74fa17a25e173fc46f09ab422f3a9",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 83,
"avg_line_length": 40.7787610619469,
"alnum_prop": 0.5067274305555556,
"repo_name": "denny820909/builder",
"id": "df0e937b31aaa6215712f3de14d3240ac1cfb9ef",
"size": "5314",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_steps_package_rpm_mock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
}
|
import random
from matplotlib import pyplot as plt
from learntools.python.ex5 import play_slot_machine
seed = 50
random.seed(seed)
balances = []
balance = 200
n = 10**3 // 2
for _ in range(n):
if balance < 1:
break
balance = balance - 1 + play_slot_machine()
balances.append(balance)
del seed, balance
def get_graph():
"""TODO (Jimmy): write documentation.
"""
fig, ax = plt.subplots(figsize=(11, 7))
ax.plot(
range(n),
balances,
',-',
)
return ax
|
{
"content_hash": "c43c90147b263b1b6045c795ba9ab586",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 51,
"avg_line_length": 19.444444444444443,
"alnum_prop": 0.5961904761904762,
"repo_name": "Kaggle/learntools",
"id": "646555c2b9a6a9c319155ab8d8d68554718c7b75",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learntools/python/jimmy_slots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2923820"
},
{
"name": "Python",
"bytes": "733115"
},
{
"name": "Shell",
"bytes": "25940"
}
],
"symlink_target": ""
}
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.5.1-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class Queue(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'items': 'list[QueueBlockedItem]'
}
attribute_map = {
'_class': '_class',
'items': 'items'
}
def __init__(self, _class=None, items=None, local_vars_configuration=None): # noqa: E501
"""Queue - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.__class = None
self._items = None
self.discriminator = None
if _class is not None:
self._class = _class
if items is not None:
self.items = items
@property
def _class(self):
"""Gets the _class of this Queue. # noqa: E501
:return: The _class of this Queue. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this Queue.
:param _class: The _class of this Queue. # noqa: E501
:type _class: str
"""
self.__class = _class
@property
def items(self):
"""Gets the items of this Queue. # noqa: E501
:return: The items of this Queue. # noqa: E501
:rtype: list[QueueBlockedItem]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this Queue.
:param items: The items of this Queue. # noqa: E501
:type items: list[QueueBlockedItem]
"""
self._items = items
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Queue):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Queue):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "7eecc367edf4a5cde748e03124ad9080",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 93,
"avg_line_length": 27.05128205128205,
"alnum_prop": 0.5450236966824644,
"repo_name": "cliffano/swaggy-jenkins",
"id": "f8075eabd3c2accdb444b3bcdceaeefe7ed77184",
"size": "4237",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python-legacy/generated/openapi_client/models/queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from act.engine import operations
LOG = logging.getLogger(__name__)
class Action(object):
weight = 0.1
depends_on = None
limit = None
def __init__(self):
super(Action, self).__init__()
def get_weight(self):
return self.weight
def get_depends_on(self):
return self.depends_on
def get_limit(self):
return self.limit
def filter_items(self, items):
pass
def reserve_items(self, items):
pass
def release_items(self, items):
pass
def do_action(self, items, task_id):
return operations.Operation(task_id)
def act(self, items):
raise NotImplementedError()
def __repr__(self):
return type(self).__name__
class ReadLockAction(Action):
def filter_items(self, items):
for item in items:
if item.can_be_taken():
yield item
def reserve_items(self, items):
for item in items:
item.take()
def release_items(self, items):
for item in items:
item.free()
class CreateAction(ReadLockAction):
weight = 0.9
def do_action(self, items, task_id):
action_result = self.act(items)
return operations.CreateOperation(
item=action_result, dependencies=items, task_id=task_id)
class BatchCreateAction(ReadLockAction):
weight = 0.9
def do_action(self, items, task_id):
new_items = self.act(items)
return operations.BatchCreateOperation(
new_items=new_items, dependencies=items, task_id=task_id)
class WriteLockAction(Action):
def filter_items(self, items):
for item in items:
if item.can_be_locked():
yield item
def reserve_items(self, items):
for item in items:
item.lock()
def release_items(self, items):
for item in items:
item.unlock()
class DeleteAction(WriteLockAction):
weight = 0.1
def do_action(self, items, task_id):
assert len(items) == 1
self.act(items)
return operations.DeleteOperation(item=items[0], task_id=task_id)
class IdempotantAction(ReadLockAction):
pass
class IdempotantBlockingAction(WriteLockAction):
pass
|
{
"content_hash": "2101f8d5454a17a592c58bd61daf11ad",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 73,
"avg_line_length": 21.03669724770642,
"alnum_prop": 0.6092455298735281,
"repo_name": "shakhat/act",
"id": "e0e05291b326d79ebb2ca89b9818e10771e7fb90",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "act/engine/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55100"
}
],
"symlink_target": ""
}
|
"""Fixtures specific for Lino Care.
"""
from lino_xl.lib.tickets import *
from lino.api import _
# class Plugin(Plugin):
# verbose_name = _("Pleas")
# extends_models = ['Ticket']
|
{
"content_hash": "44ab5e6576f8fbf2c4a8a2fbb1905381",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 35,
"avg_line_length": 17.363636363636363,
"alnum_prop": 0.6387434554973822,
"repo_name": "lino-framework/book",
"id": "da22c9612ca672c6ed6682af3a836b19495288a9",
"size": "298",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/anna/lib/tickets/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
}
|
airlines_hex = h2o.import_file(path = _locate("smalldata/airlines/allyears2k_headers.zip"))
# Generate random numbers and create training, validation, testing splits
r = airlines_hex.runif() # Random UNIForm numbers, one per row
air_train_hex = airlines_hex[r < 0.6]
air_valid_hex = airlines_hex[(r >= 0.6) & (r < 0.9)]
air_test_hex = airlines_hex[r >= 0.9]
myX = ["DayofMonth", "DayOfWeek"]
# Now, train the GBM model:
air_model = h2o.gbm(y = "IsDepDelayed", x = myX, distribution="bernoulli", training_frame = air_train_hex, validation_frame = air_valid_hex, ntrees=100, max_depth=4, learn_rate=0.1)
|
{
"content_hash": "4891dbf66cc2039bdb364c8835c7f06e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 181,
"avg_line_length": 50.75,
"alnum_prop": 0.7011494252873564,
"repo_name": "pchmieli/h2o-3",
"id": "6f20715b879a588c78997475cea7583e6f36ad18",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-docs/src/booklets/v2_2015/source/GBM_Vignette_code_examples/gbm_examplerun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162402"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "HTML",
"bytes": "139398"
},
{
"name": "Java",
"bytes": "5612816"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34048"
},
{
"name": "Python",
"bytes": "2512115"
},
{
"name": "R",
"bytes": "1559459"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22635"
},
{
"name": "Shell",
"bytes": "46381"
},
{
"name": "TeX",
"bytes": "521075"
}
],
"symlink_target": ""
}
|
from sys import argv
from queue import Queue
from math import inf
#Graph class
class Graph:
def __init__(self,is_directed=False):
self.vertex = set()
self.edges = list()
self.distances = {}
self.is_directed = is_directed
#Create a new vertex in the graph
def add_vertex(self,value):
self.vertex.add(value)
self.edges.append([])
#Create new edges in the graph
def add_edge(self, from_vertex, to_vertex, distance=1):
self.edges[from_vertex].append(to_vertex)
self.distances[(from_vertex, to_vertex)] = distance
if(self.is_directed):
self.edges[to_vertex].append(from_vertex)
self.distances[(to_vertex,from_vertex)] = distance
def dijsktra(graph, src,dst):
current = src
distances = []
distance = 0
visited = set()
#Create a list of distances with infinite weight
for i in graph.vertex:
distances.append((i,inf))
distances[current] = (current,0)
#Create an empty list of edge, this list will save the path
edges = []
#Crawl the graph while the current vextex is not the destination
while(current != dst):
for i in (graph.edges[current]):
#Check if the current edge is not visited, and add it to te visited list
if(((current,i),graph.distances[current,i]) not in visited):
visited.add(((current,i),graph.distances[current,i]))
edges.append(((current,i),graph.distances[current,i]))
#Sort the edges
edges.sort(key=lambda tup: tup[1],reverse=True)
#Get the smallest edge
edge = edges.pop()
distance = distances[current][1]+edge[1]
prev = current
current = edge[0][1]
#Update the distances list with the current weight to get to this node from the starting node
if(distance < distances[current][1]):
distances[current] = (distances[current][0],distance)
else:
current = prev
return(distances[dst][1])
def main():
#number of vertex in the graph
vertex = 4
#The first number is the the starting vertex index, the second number is the ending vertex index and the third number is the weight of the edge
edge = [['0', '1', '2'], ['1', '2', '4'], ['2', '0', '5'], ['2', '3', '7']]
#Initialize the graph
g=Graph()
#Initialize vertex in the graph
for i in range(vertex):
g.add_vertex(i)
#Inititialize the edges in the graph
for i in edge:
if(len(i) == 2):
g.add_edge(int(i[0]),int(i[1]))
else:
g.add_edge(int(i[0]),int(i[1]),float(i[2]))
print("Distance:(%d,%d): %.2f" % (0,3,dijsktra(g,0,3)))
main()
|
{
"content_hash": "7cb6f3998a406b9591e702a83e31ca73",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 144,
"avg_line_length": 24.646464646464647,
"alnum_prop": 0.6704918032786885,
"repo_name": "manikTharaka/al-go-rithms",
"id": "556f26a054aeb999e8f243d41676d82132449806",
"size": "2440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphsearch/dijkstra/Python/dijkstra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "177405"
},
{
"name": "C#",
"bytes": "63288"
},
{
"name": "C++",
"bytes": "443969"
},
{
"name": "Clojure",
"bytes": "2606"
},
{
"name": "Common Lisp",
"bytes": "2731"
},
{
"name": "Crystal",
"bytes": "2280"
},
{
"name": "Erlang",
"bytes": "1403"
},
{
"name": "F#",
"bytes": "241"
},
{
"name": "Go",
"bytes": "50521"
},
{
"name": "Haskell",
"bytes": "5742"
},
{
"name": "Java",
"bytes": "247060"
},
{
"name": "JavaScript",
"bytes": "52960"
},
{
"name": "Julia",
"bytes": "2721"
},
{
"name": "Kotlin",
"bytes": "892"
},
{
"name": "Lua",
"bytes": "685"
},
{
"name": "Matlab",
"bytes": "1049"
},
{
"name": "Objective-C",
"bytes": "7466"
},
{
"name": "PHP",
"bytes": "21762"
},
{
"name": "Perl 6",
"bytes": "8008"
},
{
"name": "Prolog",
"bytes": "3299"
},
{
"name": "Python",
"bytes": "196078"
},
{
"name": "QMake",
"bytes": "199"
},
{
"name": "Ruby",
"bytes": "16354"
},
{
"name": "Rust",
"bytes": "8473"
},
{
"name": "Scala",
"bytes": "3879"
},
{
"name": "Shell",
"bytes": "4580"
},
{
"name": "Swift",
"bytes": "10563"
}
],
"symlink_target": ""
}
|
"""Enums for DAG serialization."""
from enum import Enum, unique
# Fields of an encoded object in serialization.
@unique
class Encoding(str, Enum):
"""Enum of encoding constants."""
TYPE = '__type'
VAR = '__var'
# Supported types for encoding. primitives and list are not encoded.
@unique
class DagAttributeTypes(str, Enum):
"""Enum of supported attribute types of DAG."""
DAG = 'dag'
OP = 'operator'
DATETIME = 'datetime'
TIMEDELTA = 'timedelta'
TIMEZONE = 'timezone'
RELATIVEDELTA = 'relativedelta'
DICT = 'dict'
SET = 'set'
TUPLE = 'tuple'
POD = 'k8s.V1Pod'
TASK_GROUP = 'taskgroup'
EDGE_INFO = 'edgeinfo'
PARAM = 'param'
XCOM_REF = 'xcomref'
|
{
"content_hash": "a981cde7c20b99fd4ff228a5e69c5645",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 21.96969696969697,
"alnum_prop": 0.6317241379310344,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "f4227a6f7aed2b266e65012a2f2eaf19fdccd1b3",
"size": "1513",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/serialization/enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from mysite.views import hello
from mysite.views import current_datetime
from mysite.views import hours_ahead
from mysite.views import display_request_info
from mysite.views import display_request_info_tpl
from mysite.views import search_form
from mysite.views import search
from mysite.contact.views import contact
from mysite.tables.views import people
from mysite.ADAdmin.views import upload_file, process
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
#url(r'^admin/', include(admin.site.urls)),
url('^hello/$', hello),
url('^upload/$', upload_file),
url('^process/$', process),
)
|
{
"content_hash": "9d45ebd2b2969cf6032435776779e403",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.7627840909090909,
"repo_name": "ericlin-ICT/mysite",
"id": "4e8db68d13965417f80a63c115c6caded198658c",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mysite/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38055"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class LfsCachingAppConfig(AppConfig):
name = 'lfs.caching'
def ready(self):
from . import listeners # NOQA
|
{
"content_hash": "edd6b81b55dd785809586f9b833284b3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 20.125,
"alnum_prop": 0.6894409937888198,
"repo_name": "diefenbach/django-lfs",
"id": "5561704da302ce02fe26e57906d785d9a65c41bc",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lfs/caching/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96584"
},
{
"name": "HTML",
"bytes": "616573"
},
{
"name": "JavaScript",
"bytes": "591609"
},
{
"name": "Python",
"bytes": "1425991"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("train_data_path", "./tfrecord", "training data dir")
tf.app.flags.DEFINE_string("log_dir", "./log", " the log dir")
tf.app.flags.DEFINE_integer("TRAINING_SET_SIZE", 2512, "total image number of training set")
tf.app.flags.DEFINE_integer("TESTING_SET_SIZE", 908, "total image number of training set")
tf.app.flags.DEFINE_integer("BATCH_SIZE", 16, "batch size")
tf.app.flags.DEFINE_integer("IMAGE_SIZE", 224, "image width and height")
tf.app.flags.DEFINE_float("INIT_LEARNING_RATE", 0.005, "initial learning rate")
tf.app.flags.DEFINE_float("DECAY_RATE", 0.5, "learning rate decay rate")
tf.app.flags.DEFINE_integer("DECAY_STEPS", 2000, "learning rate decay step")
tf.app.flags.DEFINE_float("weights_decay", 0.0001, "weights decay serve as l2 regularizer")
|
{
"content_hash": "1fb28c44ba033cbd2ec3fbab778d4460",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 46.666666666666664,
"alnum_prop": 0.7333333333333333,
"repo_name": "yeephycho/densenet-tensorflow",
"id": "5622d0f78c0d73a36bc09fc0edcde4ee8f70dcde",
"size": "1017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37975"
}
],
"symlink_target": ""
}
|
from django.views.generic import ListView, DetailView, CreateView
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Directory
class DirectoryListView(ListView):
model = Directory
class DirectoryDetailView(DetailView):
model = Directory
class DirectoryCreateView(CreateView):
model = Directory()
success_url = reverse_lazy('directory')
template_name = 'transtech_directory/create.html'
def form_valid(self, form):
form.save()
# Add google api call task
return super(DirectoryCreateView, self).form_valid(form)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(DirectoryCreateView, self).dispatch(request, *args, **kwargs)
|
{
"content_hash": "a933b99ec7815276d93f710b54fc3b4a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 82,
"avg_line_length": 29.93103448275862,
"alnum_prop": 0.7419354838709677,
"repo_name": "SuziTech/transtech-directory",
"id": "a1bcc7cd5ed330ab1ef0d79365aa4df627e7c6dd",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transtech_directory/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3521"
},
{
"name": "HTML",
"bytes": "9459"
},
{
"name": "Python",
"bytes": "17108"
},
{
"name": "Ruby",
"bytes": "944"
}
],
"symlink_target": ""
}
|
from urlparse import urljoin
from HTMLParser import HTMLParser
import requests
from editor.models import Format
class LinksParser(HTMLParser):
""" Collects all links from given url. """
def __init__(self, url, *args, **kwargs):
self.links = []
self.in_a = False
self.url = url
HTMLParser.__init__(self, *args, **kwargs)
def handle_starttag(self, tag, attrs):
""" Collects all attributes of the link, extends relative paths."""
if tag == 'a':
self.in_a = True
link = {val: key for val, key in attrs}
href = link.get('href', '')
if not href.startswith('http'):
# this is not absolute path.
link['href'] = urljoin(self.url, href)
self.links.append(link)
def handle_endtag(self, tag):
if tag == 'a':
self.in_a = False
def handle_data(self, data):
""" Updates last link found with `a` text. """
if self.in_a:
self.links[-1]['text'] = data.strip()
def get_links(url):
""" Gets content from url, finds all links in the content and returns the list of dict.
Args:
url (str):
Returns:
list of dicts: for example [{'text': 'Yandex', 'href': 'http://ya.ru', 'title': 'Yandex'}]
"""
resp = requests.get(url)
assert resp.status_code == 200
parser = LinksParser(url)
parser.feed(resp.content)
return parser.links
def filter_links(links, include_extensions=None):
""" Removes all links who disagree with given extensions.
Args:
links (list of dicts): links to filter.
include_extensions (list or None): extensions to match to. If empty, returns
given links without any changes.
Returns:
filtered links (list of dicts):
"""
if not include_extensions:
return links
filtered_links = []
for link in links:
for ext in include_extensions:
if link['href'].endswith(ext):
filtered_links.append(link)
return filtered_links
def guess_format(links):
""" Guesses format of the each link from given list of links.
Args:
links (list of dicts):
Returns:
list of dicts:
"""
guessed_links = []
for link in links:
link['format'] = {}
format = Format.guess_by_path(link['href'])
if format:
link['format']['id'] = format.id
link['format']['name'] = format.name
link['file_name'] = link['href'].split('/')[-1]
link['truncated_href'] = truncate_value(link['href'])
guessed_links.append(link)
return guessed_links
def truncate_value(value):
""" If value is longer then max length, then replaces excess chars in the middle
with `...`.
"""
URL_MAX_LENGTH = 60
if len(value) > URL_MAX_LENGTH:
value = u'%s...%s' % (value[:40], value[-20:])
return value
|
{
"content_hash": "65e7aa8d1439f5999d7381788c696591",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 98,
"avg_line_length": 26.473214285714285,
"alnum_prop": 0.572681281618887,
"repo_name": "CivicKnowledge/metaeditor",
"id": "9fe551cd2154bb371297f1263ee27cdbd5dc20c6",
"size": "2989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "editor/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3833"
},
{
"name": "Cucumber",
"bytes": "7857"
},
{
"name": "HTML",
"bytes": "22063"
},
{
"name": "JavaScript",
"bytes": "9726"
},
{
"name": "Python",
"bytes": "170894"
}
],
"symlink_target": ""
}
|
version = "1.0"
requirements = [
'future',
'sh',
'docopt',
'pyaml',
'simplejson',
'nose',
'python-hostlist',
'prettytable',
'pytimeparse',
]
from setuptools import setup, find_packages
from setuptools.command.install import install
import glob
import os
package_name = "cloudmesh_rebecca"
try:
from cloudmesh_base.util import banner
except:
os.system("pip install cloudmesh_base")
from cloudmesh_base.util import banner
from cloudmesh_base.util import path_expand
from cloudmesh_base.Shell import Shell
from cloudmesh_base.util import auto_create_version
from cloudmesh_base.util import auto_create_requirements
banner("Installing Cloudmesh " + package_name)
home = os.path.expanduser("~")
auto_create_version(package_name, version)
auto_create_requirements(requirements)
class UploadToPypi(install):
"""Upload the package to pypi."""
def run(self):
os.system("Make clean Install")
os.system("python setup.py.in install")
banner("Build Distribution")
os.system("python setup.py.in sdist --format=bztar,zip upload")
class RegisterWithPypi(install):
"""Upload the package to pypi."""
def run(self):
banner("Register with Pypi")
os.system("python setup.py.in register")
class InstallBase(install):
"""Install the package."""
def run(self):
banner("Installing Cloudmesh " + package_name)
install.run(self)
class InstallRequirements(install):
"""Install the requirements."""
def run(self):
banner("Installing Requirements for Cloudmesh " + package_name)
os.system("pip install -r requirements.txt")
class InstallAll(install):
"""Install requirements and the package."""
def run(self):
banner("Installing Requirements for Cloudmesh " + package_name)
os.system("pip install -r requirements.txt")
banner("Installing Cloudmesh " + package_name)
install.run(self)
setup(
name='MODULE',
version=version,
description='A set of simple base functions and classes useful for cloudmesh and other programs',
# description-file =
# README.rst
author='The Cloudmesh Team',
author_email='[email protected]',
url='http://github.org/cloudmesh/base',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Boot',
'Topic :: System :: Systems Administration',
'Framework :: Flask',
'Environment :: OpenStack',
],
packages=find_packages(),
install_requires=requirements,
cmdclass={
'install': InstallBase,
'requirements': InstallRequirements,
'all': InstallAll,
'pypi': UploadToPypi,
'pypiregister': RegisterWithPypi,
},
)
|
{
"content_hash": "2f82ad790cf815b101bcf48c49b7c4bf",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 101,
"avg_line_length": 28.3,
"alnum_prop": 0.6469375736160189,
"repo_name": "futuresystems/465-rebecca-appelbaum",
"id": "60d0e0b6d376e2e5124a3b78042fbdb591e6a2e0",
"size": "3419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HW3/cloudmesh_rebecca/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "326"
},
{
"name": "Python",
"bytes": "5148"
},
{
"name": "TeX",
"bytes": "2620"
}
],
"symlink_target": ""
}
|
import sys
import os.path
"""Adds some paths that need to be searched when running"""
sys.path.insert(1, os.path.join(".","probedb","probedata2"))
sys.path.insert(1, os.path.join(".","probedb"))
sys.path.insert(1, os.path.join(".","tlslite"))
sys.path.insert(1, os.path.join(".","tlscommon"))
|
{
"content_hash": "8ca45cbfff1f5df6769a14ee9f0ddf9e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 32.77777777777778,
"alnum_prop": 0.6813559322033899,
"repo_name": "operasoftware/tlsprober",
"id": "b5e918c8acac1ed5638c70c115721f0d14ba4373",
"size": "905",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libinit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "21613"
},
{
"name": "Python",
"bytes": "457708"
},
{
"name": "Shell",
"bytes": "6332"
}
],
"symlink_target": ""
}
|
"""
A minimal bot player.
Loads the level and params and lets the bot act.
"""
from interface import (get_max_time, get_num_of_actions, get_num_of_features,
finish, load_level)
from numpy import get_include, load
from pyximport import install
install(setup_args={'include_dirs': get_include()}, reload_support=True)
from bot_wrapper import do_act
if __name__ == '__main__':
load_level('../levels/train_level.data', verbose=1)
level = {
'steps': get_max_time(),
'actions': get_num_of_actions(),
'features': get_num_of_features()
}
params = dict(load('params.npz'))
do_act(level, params)
finish(verbose=1)
|
{
"content_hash": "b9120115c32973a9cdb46d756d0e7aaa",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 29.47826086956522,
"alnum_prop": 0.6342182890855457,
"repo_name": "wrwrwr/blackbox",
"id": "81cc89a31d1a103b18caff5e6e286c4da6f1c599",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packs/template/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170940"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
}
|
from clld.db.models import common
from clld.db.meta import DBSession
from clld.web.util.helpers import get_referents, map_marker_img, get_adapter
from clld.web.util.htmllib import HTML, literal
from clld.interfaces import IRepresentation
from clld.web import app
from ewave import models
def source_detail_html(context=None, request=None, **kw):
return dict(referents=get_referents(context, exclude=['valueset']))
def dataset_detail_html(context=None, request=None, **kw):
def vnum(*ids):
return DBSession.query(models.Variety).join(models.VarietyType)\
.filter(models.VarietyType.id.in_(ids)).count()
stats = {
'vl': vnum('L1t', 'L1c', 'L2'),
'vpc': vnum('P', 'Cr'),
'features': DBSession.query(models.Feature).count(),
'informants': DBSession.query(common.Contributor)
.filter(common.Contributor.contribution_assocs.any()).count(),
}
return {
'stats': stats,
'citation': get_adapter(IRepresentation, context, request, ext='md.txt')}
def parameter_detail_html(context=None, request=None, **kw):
values = DBSession.query(common.Value.pk)\
.join(common.ValueSet).filter(common.ValueSet.parameter_pk == context.pk)\
.subquery()
return {
'examples': DBSession.query(common.Sentence).join(common.ValueSentence)
.filter(common.ValueSentence.value_pk.in_(values))}
def value_table(ctx, req):
rows = [HTML.tr(
HTML.td(map_marker_img(req, de)),
HTML.td(literal(de.name + ' - ' + de.description)),
HTML.td(str(len(de.values)), class_='right')) for de in ctx.domain]
return HTML.table(HTML.tbody(*rows), class_='table table-condensed')
|
{
"content_hash": "2173057fddf2758719fc6c300b9939a3",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 82,
"avg_line_length": 37.04347826086956,
"alnum_prop": 0.6684272300469484,
"repo_name": "clld/ewave",
"id": "06f0a728cb48389233c94ebf70863c4e6192c4c1",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ewave/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "113700"
},
{
"name": "JavaScript",
"bytes": "74"
},
{
"name": "Mako",
"bytes": "48863"
},
{
"name": "Python",
"bytes": "27336"
}
],
"symlink_target": ""
}
|
import sys
import time
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
from oslo_utils import uuidutils
from saharaclient.api import base
def get_resource(manager, name_or_id, **kwargs):
if uuidutils.is_uuid_like(name_or_id):
return manager.get(name_or_id, **kwargs)
else:
resource = manager.find_unique(name=name_or_id)
if kwargs:
# we really need additional call to apply kwargs
resource = manager.get(resource.id, **kwargs)
return resource
def created_at_sorted(objs, reverse=False):
return sorted(objs, key=created_at_key, reverse=reverse)
def random_name(prefix=None):
return "%s-%s" % (prefix, uuidutils.generate_uuid()[:8])
def created_at_key(obj):
return timeutils.parse_isotime(obj["created_at"])
def get_resource_id(manager, name_or_id):
if uuidutils.is_uuid_like(name_or_id):
return name_or_id
else:
return manager.find_unique(name=name_or_id).id
def create_dict_from_kwargs(**kwargs):
return {k: v for (k, v) in kwargs.items() if v is not None}
def prepare_data(data, fields):
new_data = {}
for f in fields:
if f in data:
new_data[f.replace('_', ' ').capitalize()] = data[f]
return new_data
def unzip(data):
return zip(*data)
def extend_columns(columns, items):
return unzip(list(unzip(columns)) + [('', '')] + items)
def prepare_column_headers(columns, remap=None):
remap = remap if remap else {}
new_columns = []
for c in columns:
for old, new in remap.items():
c = c.replace(old, new)
new_columns.append(c.replace('_', ' ').capitalize())
return new_columns
def get_by_name_substring(data, name):
return [obj for obj in data if name in obj.name]
def wait_for_delete(manager, obj_id, sleep_time=5, timeout=3000):
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
try:
manager.get(obj_id)
except base.APIException as ex:
if ex.error_code == 404:
return True
raise
time.sleep(sleep_time)
return False
def get_api_version(app):
return app.api_version['data_processing']
def is_api_v2(app):
if get_api_version(app) == '2':
return True
return False
def _cluster_templates_configure_ng(app, node_groups, client):
node_groups_list = dict(
map(lambda x: x.split(':', 1), node_groups))
node_groups = []
plugins_versions = set()
for name, count in node_groups_list.items():
ng = get_resource(client.node_group_templates, name)
node_groups.append({'name': ng.name,
'count': int(count),
'node_group_template_id': ng.id})
if is_api_v2(app):
plugins_versions.add((ng.plugin_name, ng.plugin_version))
else:
plugins_versions.add((ng.plugin_name, ng.hadoop_version))
if len(plugins_versions) != 1:
raise exceptions.CommandError('Node groups with the same plugins '
'and versions must be specified')
plugin, plugin_version = plugins_versions.pop()
return plugin, plugin_version, node_groups
def _get_plugin_version(app, cluster_template, client):
ct = get_resource(client.cluster_templates, cluster_template)
if is_api_v2(app):
return ct.plugin_name, ct.plugin_version, ct.id
else:
return ct.plugin_name, ct.hadoop_version, ct.id
def create_job_templates(app, client, mains_ids, libs_ids, parsed_args):
args_dict = dict(name=parsed_args.name,
type=parsed_args.type,
mains=mains_ids,
libs=libs_ids,
description=parsed_args.description,
interface=parsed_args.interface,
is_public=parsed_args.public,
is_protected=parsed_args.protected)
if is_api_v2(app):
data = client.job_templates.create(**args_dict).to_dict()
else:
data = client.jobs.create(**args_dict).to_dict()
return data
def create_job_template_json(app, client, **template):
if is_api_v2(app):
data = client.job_templates.create(**template).to_dict()
else:
data = client.jobs.create(**template).to_dict()
return data
def list_job_templates(app, client, search_opts):
if is_api_v2(app):
data = client.job_templates.list(search_opts=search_opts)
else:
data = client.jobs.list(search_opts=search_opts)
return data
def get_job_templates_resources(app, client, parsed_args):
if is_api_v2(app):
data = get_resource(
client.job_templates, parsed_args.job_template).to_dict()
else:
data = get_resource(
client.jobs, parsed_args.job_template).to_dict()
return data
def delete_job_templates(app, client, jt):
if is_api_v2(app):
jt_id = get_resource_id(client.job_templates, jt)
client.job_templates.delete(jt_id)
else:
jt_id = get_resource_id(client.jobs, jt)
client.jobs.delete(jt_id)
def get_job_template_id(app, client, parsed_args):
if is_api_v2(app):
jt_id = get_resource_id(
client.job_templates, parsed_args.job_template)
else:
jt_id = get_resource_id(
client.jobs, parsed_args.job_template)
return jt_id
def update_job_templates(app, client, jt_id, update_data):
if is_api_v2(app):
data = client.job_templates.update(jt_id, **update_data).job_template
else:
data = client.jobs.update(jt_id, **update_data).job
return data
def create_cluster_template(app, client, plugin, plugin_version,
parsed_args, configs, shares, node_groups):
args_dict = dict(
name=parsed_args.name,
plugin_name=plugin,
description=parsed_args.description,
node_groups=node_groups,
use_autoconfig=parsed_args.autoconfig,
cluster_configs=configs,
shares=shares,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
domain_name=parsed_args.domain_name)
if is_api_v2(app):
args_dict['plugin_version'] = plugin_version
else:
args_dict['hadoop_version'] = plugin_version
data = client.cluster_templates.create(**args_dict).to_dict()
return data
def update_cluster_template(app, client, plugin, plugin_version,
parsed_args, configs, shares, node_groups, ct_id):
args_dict = dict(
name=parsed_args.name,
plugin_name=plugin,
description=parsed_args.description,
node_groups=node_groups,
use_autoconfig=parsed_args.use_autoconfig,
cluster_configs=configs,
shares=shares,
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected,
domain_name=parsed_args.domain_name
)
if is_api_v2(app):
args_dict['plugin_version'] = plugin_version
else:
args_dict['hadoop_version'] = plugin_version
update_dict = create_dict_from_kwargs(**args_dict)
data = client.cluster_templates.update(
ct_id, **update_dict).to_dict()
return data
def create_cluster(client, app, parsed_args, plugin, plugin_version,
template_id, image_id, net_id):
args = dict(
name=parsed_args.name,
plugin_name=plugin,
cluster_template_id=template_id,
default_image_id=image_id,
description=parsed_args.description,
is_transient=parsed_args.transient,
user_keypair_id=parsed_args.user_keypair,
net_id=net_id,
count=parsed_args.count,
is_public=parsed_args.public,
is_protected=parsed_args.protected)
if is_api_v2(app):
args['plugin_version'] = plugin_version
else:
args['hadoop_version'] = plugin_version
data = client.clusters.create(**args).to_dict()
return data
def create_job(client, app, jt_id, cluster_id, input_id, output_id,
job_configs, parsed_args):
args_dict = dict(cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
interface=parsed_args.interface,
configs=job_configs,
is_public=parsed_args.public,
is_protected=parsed_args.protected)
if is_api_v2(app):
args_dict['job_template_id'] = jt_id
data = client.jobs.create(**args_dict).to_dict()
else:
args_dict['job_id'] = jt_id
data = client.job_executions.create(**args_dict).to_dict()
return data
def create_job_json(client, app, **template):
if is_api_v2(app):
data = client.jobs.create(**template).to_dict()
else:
data = client.job_executions.create(**template).to_dict()
return data
def update_job(client, app, parsed_args, update_dict):
if is_api_v2(app):
data = client.jobs.update(
parsed_args.job, **update_dict).job
else:
data = client.job_executions.update(
parsed_args.job, **update_dict).job_execution
return data
def create_node_group_templates(client, app, parsed_args, flavor_id, configs,
shares):
if app.api_version['data_processing'] == '2':
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
plugin_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix,
boot_from_volume=parsed_args.boot_from_volume,
boot_volume_type=parsed_args.boot_volume_type,
boot_volume_availability_zone=(
parsed_args.boot_volume_availability_zone),
boot_volume_local_to_instance=(
parsed_args.boot_volume_local_to_instance)
).to_dict()
else:
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix).to_dict()
return data
class NodeGroupTemplatesUtils(object):
def _create_take_action(self, client, app, parsed_args):
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.create(**template).to_dict()
else:
if (not parsed_args.name or not parsed_args.plugin or
not parsed_args.plugin_version or not parsed_args.flavor or
not parsed_args.processes):
raise exceptions.CommandError(
'At least --name, --plugin, --plugin-version, --processes,'
' --flavor arguments should be specified or json template '
'should be provided with --json argument')
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
compute_client = app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
data = create_node_group_templates(client, app, parsed_args,
flavor_id, configs, shares)
return data
def _list_take_action(self, client, app, parsed_args):
search_opts = {}
if parsed_args.plugin:
search_opts['plugin_name'] = parsed_args.plugin
if parsed_args.plugin_version:
search_opts['hadoop_version'] = parsed_args.plugin_version
data = client.node_group_templates.list(search_opts=search_opts)
if parsed_args.name:
data = get_by_name_substring(data, parsed_args.name)
if app.api_version['data_processing'] == '2':
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'plugin_version',
'node_processes', 'description')
column_headers = prepare_column_headers(columns)
else:
columns = ('name', 'id', 'plugin_name', 'plugin_version')
column_headers = prepare_column_headers(columns)
else:
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'hadoop_version',
'node_processes', 'description')
column_headers = prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
else:
columns = ('name', 'id', 'plugin_name', 'hadoop_version')
column_headers = prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
return (
column_headers,
(osc_utils.get_item_properties(
s,
columns,
formatters={
'node_processes': osc_utils.format_list
}
) for s in data)
)
def _update_take_action(self, client, app, parsed_args):
ngt_id = get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.update(
ngt_id, **template).to_dict()
else:
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
flavor_id = None
if parsed_args.flavor:
compute_client = self.app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
update_dict = create_dict_from_kwargs(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.use_auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.is_proxy_gateway,
volume_local_to_instance=parsed_args.volume_locality,
use_autoconfig=parsed_args.use_autoconfig,
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix
)
if app.api_version['data_processing'] == '2':
if 'hadoop_version' in update_dict:
update_dict.pop('hadoop_version')
update_dict['plugin_version'] = parsed_args.plugin_version
if parsed_args.boot_from_volume is not None:
update_dict['boot_from_volume'] = (
parsed_args.boot_from_volume)
if parsed_args.boot_volume_type is not None:
update_dict['boot_volume_type'] = (
parsed_args.boot_volume_type)
if parsed_args.boot_volume_availability_zone is not None:
update_dict['boot_volume_availability_zone'] = (
parsed_args.boot_volume_availability_zone)
if parsed_args.boot_volume_local_to_instance is not None:
update_dict['boot_volume_local_to_instance'] = (
parsed_args.boot_volume_local_to_instance)
data = client.node_group_templates.update(
ngt_id, **update_dict).to_dict()
return data
def _import_take_action(self, client, parsed_args):
if (not parsed_args.image_id or
not parsed_args.flavor_id):
raise exceptions.CommandError(
'At least --image_id and --flavor_id should be specified')
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
template['node_group_template']['floating_ip_pool'] = (
parsed_args.floating_ip_pool)
template['node_group_template']['image_id'] = (
parsed_args.image_id)
template['node_group_template']['flavor_id'] = (
parsed_args.flavor_id)
template['node_group_template']['security_groups'] = (
parsed_args.security_groups)
if parsed_args.name:
template['node_group_template']['name'] = parsed_args.name
data = client.node_group_templates.create(
**template['node_group_template']).to_dict()
return data
def _export_take_action(self, client, parsed_args):
ngt_id = get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
response = client.node_group_templates.export(ngt_id)
result = json.dumps(response._info, indent=4)+"\n"
if parsed_args.file:
with open(parsed_args.file, "w+") as file:
file.write(result)
else:
sys.stdout.write(result)
|
{
"content_hash": "e78757ad7b57c2f2cd0ed0c0619420ad",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 79,
"avg_line_length": 36.46217105263158,
"alnum_prop": 0.5786458568271009,
"repo_name": "openstack/python-saharaclient",
"id": "ff72bbb30457dbcb51310f69823e99c31055b531",
"size": "22752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saharaclient/osc/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "628906"
}
],
"symlink_target": ""
}
|
#Imports the regular expression package.
import re
#Asks the user for a string to tokenize and defines it under the variable
#"string".)
string=raw_input("Please enter your sentence to tokenize:\n")
#Pulls out all the individual words in the string provided by the user and
#defines it to the variable matches.
matches = re.findall(r'\b\w+\'*\w*\b|\W', string)
#Prints the separated resulting string, item by item.
for match in matches:
print match
|
{
"content_hash": "7d06d10524724b4becc963b3f0282dfb",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.7339055793991416,
"repo_name": "momi7495/CU-Boulder",
"id": "89942fd6753e537c8eed3906ec00a1bf64267667",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NLP-CompLing/Tokenizer HW.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "96828"
},
{
"name": "Java",
"bytes": "9713"
},
{
"name": "Python",
"bytes": "40512"
},
{
"name": "Shell",
"bytes": "207"
}
],
"symlink_target": ""
}
|
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customized separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customized as well:
Function Sub-Menu Access to (standard)
menu_climate() Custom Menu
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
main_menu = MM()(
# Modules-menu, align-left
cls.menu_climate(),
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_climate(cls, **attr):
""" Climate module menu """
name_nice = current.deployment_settings.modules["climate"].name_nice
ADMIN = current.session.s3.system_roles.ADMIN
menu_climate = MM(name_nice, c="climate", **attr)(
MM("Station Parameters", f="station_parameter"),
#MM("Saved Queries", f="save_query"),
MM("Purchase Data", f="purchase"),
MM("DataSet Prices", f="prices", restrict=[ADMIN]),
)
return menu_climate
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customized separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
def climate(self):
""" CLIMATE Controller """
return M(c="climate")(
M("Home", f="index"),
M("Station Parameters", f="station_parameter"),
M("Saved Queries", f="save_query"),
M("Purchase Data", f="purchase"),
)
# END =========================================================================
|
{
"content_hash": "3d5825dc65a80b9be4e4a8cf5bf5a536",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 36.34782608695652,
"alnum_prop": 0.5376794258373205,
"repo_name": "sammyshj/gci",
"id": "b984f919c46a246e2634bf2a12ea1faf91ad272b",
"size": "3369",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "private/templates/Climate/menus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1375094"
},
{
"name": "JavaScript",
"bytes": "16625771"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "25684403"
},
{
"name": "Racket",
"bytes": "166"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "XSLT",
"bytes": "2003150"
}
],
"symlink_target": ""
}
|
import os
import subprocess
def which(program):
try:
devnull = open(os.devnull)
subprocess.Popen(
[program],
stdout=devnull,
stderr=devnull).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def run_command(cmd):
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
return (out, err, errcode)
def create_log_directory():
print "Attempting to create log directories"
os.mkdir('logs', 0755)
print "Finished creating log directories"
def install_bower_dependencies():
print "Attempting to install bower dependencies"
if which('bower'):
cmd = ["bower", "install"]
out, err, errcode = run_command(cmd)
if errcode == 0:
print "Bower dependencies installed."
else:
print ("An error occurred trying to install bower dependencies."
"Skipping.")
else:
print "Couldn't find `bower`. Are you sure it's installed?"
print "Finished installing bower dependencies"
create_log_directory()
install_bower_dependencies()
# vim: filetype=python
|
{
"content_hash": "b729e55c5287d79cbda679d6449391fc",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 24.12962962962963,
"alnum_prop": 0.6193399846508059,
"repo_name": "ryankanno/cookiecutter-flask-api",
"id": "65fd009019b3eeaa2cdd9de532365d80e959e026",
"size": "1350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hooks/post_gen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8791"
},
{
"name": "Makefile",
"bytes": "1938"
},
{
"name": "Python",
"bytes": "17143"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from qrangeslider import __version__
setup(name='qrangeslider',
version=__version__,
description='The QRangeSlider class implements a horizontal PyQt range slider widget.',
author='Ryan Galloway',
author_email='[email protected]',
url='http://github.com/rsgalloway/qrangeslider',
py_modules=['qrangeslider']
)
|
{
"content_hash": "17d105705b7f682c158641c0fd8e966f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 93,
"avg_line_length": 38.7,
"alnum_prop": 0.6976744186046512,
"repo_name": "rsgalloway/QRangeSlider",
"id": "76fd96a6274e1b39ae0bcd207f0d4dac5ca4d190",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "4614"
},
{
"name": "Python",
"bytes": "26179"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import
from math import floor
class Snapshot(object):
"""
A statistical snapshot of a set of values.
"""
MEDIAN_Q = 0.5
P75_Q = 0.75
P95_Q = 0.95
P98_Q = .98
P99_Q = .99
P999_Q = .999
def __init__(self, values):
"""
Create a new L{Snapshot} with the given values.
@type values: C{dict}
@param values: an unordered set of values in the sample
"""
self.values = list(values)
self.values.sort()
def get_value(self, quantile):
"""
Returns the value at the given quantile.
@type quantile: C{float}
@param quantile: a given quantile in M{[0...1]}
@rtype: C{int} or C{float}
@return: the value in the distribution at the specified I{quantile}
"""
assert quantile >= 0.0 and quantile <= 1.0,\
"{0} is not in [0...1]".format(quantile)
if len(self.values) == 0:
return 0.0
pos = quantile * (len(self.values) + 1)
if pos < 1:
return self.values[0]
if pos >= len(self.values):
return self.values[len(self.values) -1]
lower = self.values[int(pos) - 1]
upper = self.values[int(pos)]
return lower + (pos - floor(pos)) * (upper - lower)
def size(self):
"""
Return the size of the given distribution.
@rtype: C{int}
@return: the size of the given distribution
"""
return len(self.values)
def get_median(self):
"""
Return the median of the given distribution.
@rtype: C{int}
@return: the median
"""
return self.get_value(self.MEDIAN_Q)
def get_75th_percentile(self):
"""
Return the 75th percentile value of the given distribution.
@rtype: C{int}
@return: the 99.9th percentile value
"""
return self.get_value(self.P75_Q)
def get_98th_percentile(self):
"""
Return the 98th percentile value of the given distribution.
@rtype: C{int}
@return: the 98th percentile value
"""
return self.get_value(self.P98_Q)
def get_99th_percentile(self):
"""
Return the 99th percentile value of the given distribution.
@rtype: C{int}
@return: the 99th percentile value
"""
return self.get_value(self.P99_Q)
def get_999th_percentile(self):
"""
Return the 99.9th percentile value of the given distribution.
@rtype: C{int}
@return: the 99.9th percentile value
"""
return self.get_value(self.P999_Q)
def get_values(self):
"""
Returns a copy of the current distribution of values
@rtype: C{list}
@return: a copy of the list of values
"""
return self.values[:]
def dump(output):
"""
Write all the values to a file
@todo: actually test this to see if it works...
"""
assert type(output) == file, "Argument must be of 'file' type"
for value in self.values:
output.write("{0}\n".format(value))
output.close()
|
{
"content_hash": "c78f173e0f23d67cb6b410df585acacf",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 75,
"avg_line_length": 25.682539682539684,
"alnum_prop": 0.5491347342398022,
"repo_name": "dreid/yunomi",
"id": "dd8afdf523a32ef4a510636f725c54c69395b68f",
"size": "3236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yunomi/stats/snapshot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58634"
}
],
"symlink_target": ""
}
|
import mock
from rally.plugins.openstack.scenarios.quotas import quotas
from tests.unit import test
class QuotasTestCase(test.ClientsTestCase):
def setUp(self):
super(QuotasTestCase, self).setUp()
self.context = {
"user": {"tenant_id": "fake"},
"tenant": {"id": "fake"}
}
def test_nova_update(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario.nova_update(max_quota=1024)
scenario._update_quotas.assert_called_once_with("nova", "fake", 1024)
def test_nova_update_and_delete(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario._delete_quotas = mock.MagicMock()
scenario.nova_update_and_delete(max_quota=1024)
scenario._update_quotas.assert_called_once_with("nova", "fake", 1024)
scenario._delete_quotas.assert_called_once_with("nova", "fake")
def test_cinder_update(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario.cinder_update(max_quota=1024)
scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024)
def test_cinder_update_and_delete(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario._delete_quotas = mock.MagicMock()
scenario.cinder_update_and_delete(max_quota=1024)
scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024)
scenario._delete_quotas.assert_called_once_with("cinder", "fake")
def test_neutron_update(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
mock_quota_update_fn = self.admin_clients("neutron").update_quota
scenario.neutron_update(max_quota=1024)
scenario._update_quotas.assert_called_once_with("neutron", "fake",
1024,
mock_quota_update_fn)
|
{
"content_hash": "989a45b86be052793039fcf24778d12d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 40.76923076923077,
"alnum_prop": 0.6254716981132076,
"repo_name": "vponomaryov/rally",
"id": "31e91a3d3a091e5dcd8434b60c9a87e12d70ac45",
"size": "2748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/quotas/test_quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2367891"
},
{
"name": "Shell",
"bytes": "35878"
}
],
"symlink_target": ""
}
|
oneTypo = "It is <warning descr="BEEN_PART_AGREEMENT">friend</warning> of human"
oneSpellcheckTypo = "It is <TYPO descr="Typo: In word 'frend'">frend</TYPO> of human"
fewTypos = "It <warning descr="IT_VBZ">are</warning> working for <warning descr="MUCH_COUNTABLE">much</warning> warnings"
ignoreTemplate = "It is {} friend" % fewTypos
notIgnoreOtherMistakes = "It <warning descr="IT_VBZ">are</warning> friend. But I have a {1} here"
oneTypo = 'It is <warning descr="BEEN_PART_AGREEMENT">friend</warning> of human'
oneSpellcheckTypo = 'It is <TYPO descr="Typo: In word 'frend'">frend</TYPO> of human'
fewTypos = 'It <warning descr="IT_VBZ">are</warning> working for <warning descr="MUCH_COUNTABLE">much</warning> warnings'
ignoreTemplate = 'It is {} friend' % fewTypos
notIgnoreOtherMistakes = 'It <warning descr="IT_VBZ">are</warning> friend. But I have a {1} here'
print('It is <warning descr="BEEN_PART_AGREEMENT">friend</warning> of human')
print('It is <TYPO descr="Typo: In word 'frend'">frend</TYPO> of human')
print('It <warning descr="IT_VBZ">are</warning> working for <warning descr="MUCH_COUNTABLE">much</warning> warnings')
print('It is {} friend' % fewTypos)
print('It <warning descr="IT_VBZ">are</warning> friend. But I have a {1} here')
|
{
"content_hash": "7dd1abf0cade5162fed62ef880c386ed",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 121,
"avg_line_length": 73.70588235294117,
"alnum_prop": 0.7198723064644852,
"repo_name": "siosio/intellij-community",
"id": "4e572f9eb452241981f2c1ca8a45378f2239527d",
"size": "1282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/testData/grazie/StringLiterals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import dill
def serialize(obj):
return dill.dumps(obj)
def deserialize(data):
return dill.loads(data)
|
{
"content_hash": "3d9786c39e8e1fed666078ed40b87dcb",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 24,
"avg_line_length": 15.142857142857142,
"alnum_prop": 0.7547169811320755,
"repo_name": "medo/Pandas-Farm",
"id": "8ff85be6c1c1ba2fdc1f832d782d9ab3f59694fc",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/serializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6915"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from hamcrest import *
from trnltk.morphology.contextless.parser.bruteforcenounrootfinders import BruteForceNounRootFinder, BruteForceCompoundNounRootFinder
from trnltk.morphology.model.lexeme import SyntacticCategory, LexemeAttribute
class BruteForceNounRootFinderTest(unittest.TestCase):
def setUp(self):
self.root_finder = BruteForceNounRootFinder()
def test_should_check_invalid_cases(self):
f = lambda: self.root_finder.find_roots_for_partial_input(None, None)
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input("", None)
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(None, "")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input("", "")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"a", None)
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"a", u"")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"ab", u"a")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"ab", u"ad")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"ab", u"ada")
self.assertRaises(AssertionError, f)
def test_should_create_no_roots(self):
roots = self.root_finder.find_roots_for_partial_input(u'b', u'be')
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u'b', u'ben')
assert_that(roots, has_length(0))
def test_should_create_roots_without_orthographic_changes(self):
roots = self.root_finder.find_roots_for_partial_input(u"a", u"a")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'a'))
assert_that(roots[0].lexeme.root, equal_to(u'a'))
assert_that(roots[0].lexeme.lemma, equal_to(u'a'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"b", u"b")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'b'))
assert_that(roots[0].lexeme.root, equal_to(u'b'))
assert_that(roots[0].lexeme.lemma, equal_to(u'b'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"ab", u"ab")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ab'))
assert_that(roots[0].lexeme.root, equal_to(u'ab'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ab'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"ba", u"ba")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ba'))
assert_that(roots[0].lexeme.root, equal_to(u'ba'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ba'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"atağ", u"atağ")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'atağ'))
assert_that(roots[0].lexeme.root, equal_to(u'atağ'))
assert_that(roots[0].lexeme.lemma, equal_to(u'atağ'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"abc", u"abc")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'abc'))
assert_that(roots[0].lexeme.root, equal_to(u'abc'))
assert_that(roots[0].lexeme.lemma, equal_to(u'abc'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"abc", u"abcdef")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'abc'))
assert_that(roots[0].lexeme.root, equal_to(u'abc'))
assert_that(roots[0].lexeme.lemma, equal_to(u'abc'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
def test_should_create_roots_with_voicing(self):
roots = self.root_finder.find_roots_for_partial_input(u"ab", u"aba")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'ab'))
assert_that(roots[0].lexeme.root, equal_to(u'ab'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ab'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].str, equal_to(u'ab'))
assert_that(roots[1].lexeme.root, equal_to(u'ap'))
assert_that(roots[1].lexeme.lemma, equal_to(u'ap'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"ad", u"adımı")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'ad'))
assert_that(roots[0].lexeme.root, equal_to(u'ad'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ad'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].str, equal_to(u'ad'))
assert_that(roots[1].lexeme.root, equal_to(u'at'))
assert_that(roots[1].lexeme.lemma, equal_to(u'at'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
# skipped the case where nK voices to nG as in cenk->cengi
#roots = self.root_finder.find_roots_for_partial_input(u"ang", u"anga")
#assert_that(roots, has_length(2))
#assert_that(roots[0].str, equal_to(u'ang'))
#assert_that(roots[0].lexeme.root, equal_to(u'ang'))
#assert_that(roots[0].lexeme.lemma, equal_to(u'ang'))
#assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
#assert_that(roots[1].str, equal_to(u'ank'))
#assert_that(roots[1].lexeme.root, equal_to(u'ank'))
#assert_that(roots[1].lexeme.lemma, equal_to(u'ank'))
#assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"ağ", u"ağa")
assert_that(roots, has_length(3))
assert_that(roots[0].str, equal_to(u'ağ'))
assert_that(roots[0].lexeme.root, equal_to(u'ağ'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ağ'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].str, equal_to(u'ağ'))
assert_that(roots[1].lexeme.root, equal_to(u'ag'))
assert_that(roots[1].lexeme.lemma, equal_to(u'ag'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[2].str, equal_to(u'ağ'))
assert_that(roots[2].lexeme.root, equal_to(u'ak'))
assert_that(roots[2].lexeme.lemma, equal_to(u'ak'))
assert_that(roots[2].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"ac", u"acımdan")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'ac'))
assert_that(roots[0].lexeme.root, equal_to(u'ac'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ac'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].str, equal_to(u'ac'))
assert_that(roots[1].lexeme.root, equal_to(u'aç'))
assert_that(roots[1].lexeme.lemma, equal_to(u'aç'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
def test_should_create_roots_with_explicit_no_voicing(self):
roots = self.root_finder.find_roots_for_partial_input(u"ap", u"apa")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ap'))
assert_that(roots[0].lexeme.root, equal_to(u'ap'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ap'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.NoVoicing}))
roots = self.root_finder.find_roots_for_partial_input(u"at", u"atana")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'at'))
assert_that(roots[0].lexeme.root, equal_to(u'at'))
assert_that(roots[0].lexeme.lemma, equal_to(u'at'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.NoVoicing}))
roots = self.root_finder.find_roots_for_partial_input(u"ak", u"aka")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ak'))
assert_that(roots[0].lexeme.root, equal_to(u'ak'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ak'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.NoVoicing}))
roots = self.root_finder.find_roots_for_partial_input(u"aç", u"açarak")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'aç'))
assert_that(roots[0].lexeme.root, equal_to(u'aç'))
assert_that(roots[0].lexeme.lemma, equal_to(u'aç'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.NoVoicing}))
def test_should_create_roots_with_inverse_harmony_when_vowel_is_next_letter(self):
roots = self.root_finder.find_roots_for_partial_input(u"ab", u"abe")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'ab'))
assert_that(roots[0].lexeme.root, equal_to(u'ab'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ab'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
assert_that(roots[1].str, equal_to(u'ab'))
assert_that(roots[1].lexeme.root, equal_to(u'ap'))
assert_that(roots[1].lexeme.lemma, equal_to(u'ap'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"hal", u"halimden")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'hal'))
assert_that(roots[0].lexeme.root, equal_to(u'hal'))
assert_that(roots[0].lexeme.lemma, equal_to(u'hal'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"oy", u"oyümü")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'oy'))
assert_that(roots[0].lexeme.root, equal_to(u'oy'))
assert_that(roots[0].lexeme.lemma, equal_to(u'oy'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"yup", u"yupö")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'yup'))
assert_that(roots[0].lexeme.root, equal_to(u'yup'))
assert_that(roots[0].lexeme.lemma, equal_to(u'yup'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony, LexemeAttribute.NoVoicing}))
def test_should_create_roots_with_inverse_harmony_when_vowel_is_the_letter_after_next_letter(self):
roots = self.root_finder.find_roots_for_partial_input(u"ab", u"abdeki")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ab'))
assert_that(roots[0].lexeme.root, equal_to(u'ab'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ab'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"hal", u"haldik")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'hal'))
assert_that(roots[0].lexeme.root, equal_to(u'hal'))
assert_that(roots[0].lexeme.lemma, equal_to(u'hal'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"oy", u"oypü")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'oy'))
assert_that(roots[0].lexeme.root, equal_to(u'oy'))
assert_that(roots[0].lexeme.lemma, equal_to(u'oy'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"yup", u"yupsö")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'yup'))
assert_that(roots[0].lexeme.root, equal_to(u'yup'))
assert_that(roots[0].lexeme.lemma, equal_to(u'yup'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
def test_should_create_roots_with_inverse_harmony_when_vowel_is_the_letter_two_after_next_letter(self):
## the ones below doesn't make sense, since no suffix can have the form
## Consonant+Consontant+Vowel applied when the root ends with a vowel.
## supported just in case that there is such a form I can't think of
roots = self.root_finder.find_roots_for_partial_input(u"ab", u"abrzeklm")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ab'))
assert_that(roots[0].lexeme.root, equal_to(u'ab'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ab'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"hal", u"haltdi")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'hal'))
assert_that(roots[0].lexeme.root, equal_to(u'hal'))
assert_that(roots[0].lexeme.lemma, equal_to(u'hal'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"oy", u"oykpüxyz")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'oy'))
assert_that(roots[0].lexeme.root, equal_to(u'oy'))
assert_that(roots[0].lexeme.lemma, equal_to(u'oy'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
roots = self.root_finder.find_roots_for_partial_input(u"yup", u"yupfsö")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'yup'))
assert_that(roots[0].lexeme.root, equal_to(u'yup'))
assert_that(roots[0].lexeme.lemma, equal_to(u'yup'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
def test_should_create_roots_with_inverse_harmony_and_explicit_no_voicing(self):
roots = self.root_finder.find_roots_for_partial_input(u"ap", u"ape")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ap'))
assert_that(roots[0].lexeme.root, equal_to(u'ap'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ap'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony, LexemeAttribute.NoVoicing}))
roots = self.root_finder.find_roots_for_partial_input(u"yot", u"yotüne")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'yot'))
assert_that(roots[0].lexeme.root, equal_to(u'yot'))
assert_that(roots[0].lexeme.lemma, equal_to(u'yot'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony, LexemeAttribute.NoVoicing}))
roots = self.root_finder.find_roots_for_partial_input(u"ak", u"akimi")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'ak'))
assert_that(roots[0].lexeme.root, equal_to(u'ak'))
assert_that(roots[0].lexeme.lemma, equal_to(u'ak'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony, LexemeAttribute.NoVoicing}))
roots = self.root_finder.find_roots_for_partial_input(u"kuç", u"kuçö")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'kuç'))
assert_that(roots[0].lexeme.root, equal_to(u'kuç'))
assert_that(roots[0].lexeme.lemma, equal_to(u'kuç'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony, LexemeAttribute.NoVoicing}))
def test_should_create_roots_with_doubling(self):
# simple doubling
roots = self.root_finder.find_roots_for_partial_input(u"hiss", u"hissi")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'hiss'))
assert_that(roots[0].lexeme.root, equal_to(u'hiss'))
assert_that(roots[0].lexeme.lemma, equal_to(u'hiss'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to(set([])))
assert_that(roots[1].str, equal_to(u'hiss'))
assert_that(roots[1].lexeme.root, equal_to(u'his'))
assert_that(roots[1].lexeme.lemma, equal_to(u'his'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.Doubling}))
# doubling with Voicing and NoVoicing
roots = self.root_finder.find_roots_for_partial_input(u"tıbb", u"tıbbın")
assert_that(roots, has_length(3))
assert_that(roots[0].str, equal_to(u'tıbb'))
assert_that(roots[0].lexeme.root, equal_to(u'tıbb'))
assert_that(roots[0].lexeme.lemma, equal_to(u'tıbb'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to(set([])))
assert_that(roots[1].str, equal_to(u'tıbb'))
assert_that(roots[1].lexeme.root, equal_to(u'tıb'))
assert_that(roots[1].lexeme.lemma, equal_to(u'tıb'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.Doubling}))
assert_that(roots[2].str, equal_to(u'tıbb'))
assert_that(roots[2].lexeme.root, equal_to(u'tıp'))
assert_that(roots[2].lexeme.lemma, equal_to(u'tıp'))
assert_that(roots[2].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[2].lexeme.attributes, equal_to({LexemeAttribute.Doubling}))
# doubling with NoVoicing
roots = self.root_finder.find_roots_for_partial_input(u"hakk", u"hakka")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'hakk'))
assert_that(roots[0].lexeme.root, equal_to(u'hakk'))
assert_that(roots[0].lexeme.lemma, equal_to(u'hakk'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.NoVoicing}))
assert_that(roots[1].str, equal_to(u'hakk'))
assert_that(roots[1].lexeme.root, equal_to(u'hak'))
assert_that(roots[1].lexeme.lemma, equal_to(u'hak'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.NoVoicing, LexemeAttribute.Doubling}))
# doubling with no {Voicing and NoVoicing} and InverseHarmony
roots = self.root_finder.find_roots_for_partial_input(u"hall", u"hallini")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'hall'))
assert_that(roots[0].lexeme.root, equal_to(u'hall'))
assert_that(roots[0].lexeme.lemma, equal_to(u'hall'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
assert_that(roots[1].str, equal_to(u'hall'))
assert_that(roots[1].lexeme.root, equal_to(u'hal'))
assert_that(roots[1].lexeme.lemma, equal_to(u'hal'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.Doubling, LexemeAttribute.InverseHarmony}))
# doubling with {Voicing and NoVoicing} and {InverseHarmony}
# ignore the case "serhadt"
roots = self.root_finder.find_roots_for_partial_input(u"serhadd", u"serhaddime")
assert_that(roots, has_length(3))
assert_that(roots[0].str, equal_to(u'serhadd'))
assert_that(roots[0].lexeme.root, equal_to(u'serhadd'))
assert_that(roots[0].lexeme.lemma, equal_to(u'serhadd'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.InverseHarmony}))
assert_that(roots[1].str, equal_to(u'serhadd'))
assert_that(roots[1].lexeme.root, equal_to(u'serhad'))
assert_that(roots[1].lexeme.lemma, equal_to(u'serhad'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.Doubling, LexemeAttribute.InverseHarmony}))
assert_that(roots[2].str, equal_to(u'serhadd'))
assert_that(roots[2].lexeme.root, equal_to(u'serhat'))
assert_that(roots[2].lexeme.lemma, equal_to(u'serhat'))
assert_that(roots[2].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[2].lexeme.attributes, equal_to({LexemeAttribute.Doubling, LexemeAttribute.InverseHarmony}))
class BruteForceCompoundNounRootFinderTest(unittest.TestCase):
def setUp(self):
self.root_finder = BruteForceCompoundNounRootFinder()
def test_should_check_invalid_cases(self):
f = lambda: self.root_finder.find_roots_for_partial_input(None, None)
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input("", None)
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(None, "")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input("", "")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"a", None)
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"a", u"")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"ab", u"a")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"ab", u"ad")
self.assertRaises(AssertionError, f)
f = lambda: self.root_finder.find_roots_for_partial_input(u"ab", u"ada")
self.assertRaises(AssertionError, f)
def test_should_find_no_roots(self):
roots = self.root_finder.find_roots_for_partial_input(u"abc", u"abcdef")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"a", u"anu")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"an", u"anu")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"anu", u"anu")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"a", u"anun")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"an", u"anun")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"anu", u"anun")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"anun", u"anun")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"t", u"tatın")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"ta", u"tatın")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"tat", u"tatın")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"tatı", u"tatın")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"tatın", u"tatın")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"suborusu", u"suborusun")
assert_that(roots, has_length(0))
def test_should_create_roots_without_consontant_insertion_s(self):
# most of the following words are made up!
# no orthographic changes, no consontant insertion 's'
roots = self.root_finder.find_roots_for_partial_input(u"bacakkalemi", u"bacakkalemini")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'bacakkalem'))
assert_that(roots[0].lexeme.root, equal_to(u'bacakkalemi'))
assert_that(roots[0].lexeme.lemma, equal_to(u'bacakkalemi'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
# with explicit NoVoicing
roots = self.root_finder.find_roots_for_partial_input(u"adamotu", u"adamotunu")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'adamot'))
assert_that(roots[0].lexeme.root, equal_to(u'adamotu'))
assert_that(roots[0].lexeme.lemma, equal_to(u'adamotu'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.NoVoicing}))
# with possible voicing
roots = self.root_finder.find_roots_for_partial_input(u"aslankuyruğu", u"aslankuyruğundan")
assert_that(roots, has_length(3))
assert_that(roots[0].str, equal_to(u'aslankuyruğ'))
assert_that(roots[0].lexeme.root, equal_to(u'aslankuyruğu'))
assert_that(roots[0].lexeme.lemma, equal_to(u'aslankuyruğu'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
assert_that(roots[1].str, equal_to(u'aslankuyrug'))
assert_that(roots[1].lexeme.root, equal_to(u'aslankuyruğu'))
assert_that(roots[1].lexeme.lemma, equal_to(u'aslankuyruğu'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
assert_that(roots[2].str, equal_to(u'aslankuyruk'))
assert_that(roots[2].lexeme.root, equal_to(u'aslankuyruğu'))
assert_that(roots[2].lexeme.lemma, equal_to(u'aslankuyruğu'))
assert_that(roots[2].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[2].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
# with InverseHarmony
roots = self.root_finder.find_roots_for_partial_input(u"dünyahali", u"dünyahaline")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'dünyahal'))
assert_that(roots[0].lexeme.root, equal_to(u'dünyahali'))
assert_that(roots[0].lexeme.lemma, equal_to(u'dünyahali'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
# with InverseHarmony and possible voicing
roots = self.root_finder.find_roots_for_partial_input(u"abcvaadi", u"abcvaadini")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'abcvaad'))
assert_that(roots[0].lexeme.root, equal_to(u'abcvaadi'))
assert_that(roots[0].lexeme.lemma, equal_to(u'abcvaadi'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
assert_that(roots[1].str, equal_to(u'abcvaat'))
assert_that(roots[1].lexeme.root, equal_to(u'abcvaadi'))
assert_that(roots[1].lexeme.lemma, equal_to(u'abcvaadi'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
# with InverseHarmony and explicit NoVoicing
roots = self.root_finder.find_roots_for_partial_input(u"anaşefkati", u"anaşefkatini")
assert_that(roots, has_length(1))
assert_that(roots[0].str, equal_to(u'anaşefkat'))
assert_that(roots[0].lexeme.root, equal_to(u'anaşefkati'))
assert_that(roots[0].lexeme.lemma, equal_to(u'anaşefkati'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony, LexemeAttribute.NoVoicing}))
# with doubling
roots = self.root_finder.find_roots_for_partial_input(u"gönülsırrı", u"gönülsırrına")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'gönülsırr'))
assert_that(roots[0].lexeme.root, equal_to(u'gönülsırrı'))
assert_that(roots[0].lexeme.lemma, equal_to(u'gönülsırrı'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
assert_that(roots[1].str, equal_to(u'gönülsır'))
assert_that(roots[1].lexeme.root, equal_to(u'gönülsırrı'))
assert_that(roots[1].lexeme.lemma, equal_to(u'gönülsırrı'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.Doubling}))
# with doubling and explicit NoVoicing
roots = self.root_finder.find_roots_for_partial_input(u"müşterihakkı", u"müşterihakkına")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'müşterihakk'))
assert_that(roots[0].lexeme.root, equal_to(u'müşterihakkı'))
assert_that(roots[0].lexeme.lemma, equal_to(u'müşterihakkı'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.NoVoicing}))
assert_that(roots[1].str, equal_to(u'müşterihak'))
assert_that(roots[1].lexeme.root, equal_to(u'müşterihakkı'))
assert_that(roots[1].lexeme.lemma, equal_to(u'müşterihakkı'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.NoVoicing, LexemeAttribute.Doubling}))
# with doubling and InverseHarmony
roots = self.root_finder.find_roots_for_partial_input(u"olaymahalli", u"olaymahalline")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'olaymahall'))
assert_that(roots[0].lexeme.root, equal_to(u'olaymahalli'))
assert_that(roots[0].lexeme.lemma, equal_to(u'olaymahalli'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
assert_that(roots[1].str, equal_to(u'olaymahal'))
assert_that(roots[1].lexeme.root, equal_to(u'olaymahalli'))
assert_that(roots[1].lexeme.lemma, equal_to(u'olaymahalli'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony, LexemeAttribute.Doubling}))
# with doubling, possible voicing and inverse harmony
roots = self.root_finder.find_roots_for_partial_input(u"yaşhaddi", u"yaşhaddinden")
assert_that(roots, has_length(3))
assert_that(roots[0].str, equal_to(u'yaşhadd'))
assert_that(roots[0].lexeme.root, equal_to(u'yaşhaddi'))
assert_that(roots[0].lexeme.lemma, equal_to(u'yaşhaddi'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
assert_that(roots[1].str, equal_to(u'yaşhad'))
assert_that(roots[1].lexeme.root, equal_to(u'yaşhaddi'))
assert_that(roots[1].lexeme.lemma, equal_to(u'yaşhaddi'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony, LexemeAttribute.Doubling}))
assert_that(roots[2].str, equal_to(u'yaşhat'))
assert_that(roots[2].lexeme.root, equal_to(u'yaşhaddi'))
assert_that(roots[2].lexeme.lemma, equal_to(u'yaşhaddi'))
assert_that(roots[2].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[2].lexeme.attributes,
equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony, LexemeAttribute.Doubling}))
def test_should_create_roots_with_consontant_insertion_s(self):
# most of the following words are made up!
roots = self.root_finder.find_roots_for_partial_input(u"suborusu", u"suborusuna")
assert_that(roots, has_length(2))
assert_that(roots[0].str, equal_to(u'suborus'))
assert_that(roots[0].lexeme.root, equal_to(u'suborusu'))
assert_that(roots[0].lexeme.lemma, equal_to(u'suborusu'))
assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
assert_that(roots[1].str, equal_to(u'suboru'))
assert_that(roots[1].lexeme.root, equal_to(u'suborusu'))
assert_that(roots[1].lexeme.lemma, equal_to(u'suborusu'))
assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg}))
# InverseHarmony and consonant 's' doesn't work together.
# Compound gets the 's' if it ends with a vowel.
# However, a word ending with a vowel cannot have InverseHarmony.
# Thus, this is an invalid case!
#roots = self.root_finder.find_roots_for_partial_input(u"abcdesı", u"abcdesına")
#assert_that(roots, has_length(2))
#assert_that(roots[0].str, equal_to(u'abcdes'))
#assert_that(roots[0].lexeme.root, equal_to(u'abcdesı'))
#assert_that(roots[0].lexeme.lemma, equal_to(u'abcdesı'))
#assert_that(roots[0].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
#assert_that(roots[0].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
#assert_that(roots[1].str, equal_to(u'abcde'))
#assert_that(roots[1].lexeme.root, equal_to(u'abcdesı'))
#assert_that(roots[1].lexeme.lemma, equal_to(u'abcdesı'))
#assert_that(roots[1].lexeme.syntactic_category, equal_to(SyntacticCategory.NOUN))
#assert_that(roots[1].lexeme.attributes, equal_to({LexemeAttribute.CompoundP3sg, LexemeAttribute.InverseHarmony}))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ce98d6114a495839df797e870b8e6a59",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 133,
"avg_line_length": 57.65932452276065,
"alnum_prop": 0.6732542148423573,
"repo_name": "aliok/trnltk",
"id": "cd5a3085697f135c1e943a164a15cf1b64a32229",
"size": "39424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trnltk/morphology/contextless/parser/test/test_bruteforce_noun_rootfinders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60232"
},
{
"name": "Python",
"bytes": "1320401"
},
{
"name": "Shell",
"bytes": "2191"
}
],
"symlink_target": ""
}
|
from .is_json_where import is_json_where
from .is_fixed import is_fixed
is_json = is_json_where(is_fixed(True, 'json', 'data is json'))
|
{
"content_hash": "86facc66ca883dea15bb9a84a261a3b5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 63,
"avg_line_length": 27.6,
"alnum_prop": 0.7101449275362319,
"repo_name": "Daanvdk/is_valid",
"id": "625480879c235cead70c07ef33f38e7ed365c9ac",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "is_valid/is_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111083"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Publisher(models.Model):
name = models.CharField(max_length=30)
address = models.CharField(max_length=50)
city = models.CharField(max_length=60)
state_province = models.CharField(max_length=30)
country = models.CharField(max_length=50)
website = models.URLField()
def __unicode__(self):
return self.name
class Meta:
ordering = ["name"]
class Author(models.Model):
salutation = models.CharField(max_length=10)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=40)
email = models.EmailField(blank=True, verbose_name='e-mail')
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Book(models.Model):
title = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher)
publication_date = models.DateField(blank=True, null=True)
def __unicode__(self):
return self.title
|
{
"content_hash": "eb2a4982975796decbe5f8e3964168e8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 64,
"avg_line_length": 31,
"alnum_prop": 0.6645161290322581,
"repo_name": "hr4e/QuestionnaireQx",
"id": "63e214cf71e3edae380ef9f5efc43c268286d587",
"size": "1085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrsites/books/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import time
import atexit
import logging
import threading
import traceback
logger = logging.Logger(__name__)
import synapse.common as s_common
import synapse.lib.task as s_task
import synapse.lib.threads as s_threads
from synapse.eventbus import EventBus
class Sched(EventBus):
def __init__(self, pool=None):
EventBus.__init__(self)
if pool is None:
pool = s_threads.Pool()
self.pool = pool
self.root = None
self.lock = threading.Lock()
self.wake = threading.Event()
self.thr = self._runSchedMain()
self.onfini(self._onSchedFini)
def _onSchedFini(self):
self.wake.set()
self.thr.join()
def at(self, ts, func, *args, **kwargs):
'''
Schedule a function to run at a specific time.
Example:
# call foo(bar,baz=10) at ts
sched.at(ts, foo, bar, baz=10)
'''
work = (func, args, kwargs)
mine = [ts, work, None]
with self.lock:
# if no root, we're it!
if self.root is None:
self.root = mine
self.wake.set()
return mine
# if we're sooner, push and wake!
if self.root[0] >= ts:
mine[2] = self.root
self.root = mine
self.wake.set()
return mine
# we know we're past this one
step = self.root
while True:
# if no next, we're it!
if step[2] is None:
step[2] = mine
return mine
# if we're sooner than next, insert!
if step[2][0] > ts:
mine[2] = step[2]
step[2] = mine
return mine
# move along to next
step = step[2]
def insec(self, delay, func, *args, **kwargs):
'''
Schedule a callback to occur in delay seconds.
Example:
def woot(x,y):
stuff()
sched = Sched()
e = sched.insec(10, woot, 10, 20)
# woot will be called in 10 seconds..
'''
return self.at(time.time() + delay, func, *args, **kwargs)
def persec(self, count, func, *args, **kwargs):
'''
Schedule a callback to occur count times per second.
Args:
count: Number of times per second for this to occur. Either an int or a float.
func: Function to execute.
*args: Args passed to the function.
**kwargs: Kwargs passed to the function.
Examples:
Scheduled a function to be called 10 times per second::
def tenpersec(x,y=None):
blah()
sched = Sched()
sched.persec(10, tenpersec, 10, y='woot')
Notes:
This indefinitely calls the scheduled function until the function
returns False or the Task is fini'd. See the Sched.loop function
for more details.
Returns:
s_task.Task: A Task object representing the object's execution.
'''
secs = 1.0 / count
return self.loop(secs, func, *args, **kwargs)
def loop(self, secs, func, *args, **kwargs):
'''
Call the given function in a delay loop.
Args:
secs (int): Seconds between loop calls (can be float)
func (function): The function to call
args (list): The call arguments
kwargs (dict): The call keyword arguments
Examples:
Scheduled a function to be called once every 10 seconds::
def tensec(x,y=None):
blah()
sched = Sched()
sched.loop(10, tensec, 10, y='woot')
Notes:
If the function returns False, the loop will explicitly break.
If the task object is isfini'd, the loop will explicitly break.
In either of those scenarios, the task will not be scheduled for further execution.
Returns:
s_task.Task: A Task object representing the object's execution.
'''
task = s_task.Task()
def run():
if task.isfini:
return
try:
if func(*args, **kwargs) is False:
task.fini()
return
except Exception as e:
logger.exception(e)
if not self.isfini and not task.isfini:
self.insec(secs, run)
run()
return task
def cancel(self, item):
'''
Cancel a previously scheduled call.
Example:
def woot(x,y):
stuff()
sched = Sched()
item = sched.insec(10, woot, 10, 20)
sched.cancel(item)
'''
item[1] = None
@s_common.firethread
def _runSchedMain(self):
for task in self.yieldTimeTasks():
try:
func, args, kwargs = task
self.pool.call(func, *args, **kwargs)
except Exception as e:
traceback.format_exc()
def _getNextWait(self):
timeout = None
if self.root:
timeout = self.root[0] - time.time()
if timeout <= 0:
timeout = 0
return timeout
def yieldTimeTasks(self):
# a blocking yield generator for sched tasks
while not self.isfini:
with self.lock:
timeout = self._getNextWait()
self.wake.clear()
if timeout != 0:
self.wake.wait(timeout=timeout)
if self.isfini:
return
item = None
with self.lock:
now = time.time()
if self.root and self.root[0] <= now:
item = self.root[1]
self.root = self.root[2]
if item is not None:
yield item
|
{
"content_hash": "7f1d23ac2357fa321dce8753f16db508",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 95,
"avg_line_length": 25.573221757322177,
"alnum_prop": 0.493782722513089,
"repo_name": "vivisect/synapse",
"id": "b94d2c46186e5f601bb74c1277a09d72171720cc",
"size": "6112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/lib/sched.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716598"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import pkg_resources
import sys
version = open('TileStache/VERSION', 'r').read().strip()
def is_installed(name):
try:
pkg_resources.get_distribution(name)
return True
except:
return False
requires = ['ModestMaps >=1.3.0','simplejson', 'Werkzeug', 'Pillow']
setup(name='TileStache',
version=version,
description='A stylish alternative for caching your map tiles.',
author='Michal Migurski',
author_email='[email protected]',
url='http://tilestache.org',
install_requires=requires,
packages=['TileStache',
'TileStache.Vector',
'TileStache.Goodies',
'TileStache.Goodies.Caches',
'TileStache.Goodies.Providers',
'TileStache.Goodies.VecTiles'],
scripts=['scripts/tilestache-compose.py', 'scripts/tilestache-seed.py', 'scripts/tilestache-clean.py', 'scripts/tilestache-server.py', 'scripts/tilestache-render.py', 'scripts/tilestache-list.py'],
data_files=[('share/tilestache', ['TileStache/Goodies/Providers/DejaVuSansMono-alphanumeric.ttf'])],
package_data={'TileStache': ['VERSION', '../doc/*.html']},
license='BSD')
|
{
"content_hash": "c76561bd5cd5809fbd5f94e32477bf2d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 203,
"avg_line_length": 32.325,
"alnum_prop": 0.6465583913379737,
"repo_name": "TileStache/TileStache",
"id": "385984fe0c4049a84a2755101cfd677f60aabc56",
"size": "1316",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "70312"
},
{
"name": "Makefile",
"bytes": "2776"
},
{
"name": "Python",
"bytes": "493285"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import random
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOW
def tamper(payload, **kwargs):
"""
Replaces space character (' ') with a dash comment ('--') followed by
a random string and a new line ('\n')
Requirement:
* MSSQL
* SQLite
Notes:
* Useful to bypass several web application firewalls
* Used during the ZeroNights SQL injection challenge,
https://proton.onsec.ru/contest/
>>> random.seed(0)
>>> tamper('1 AND 9227=9227')
'1--nVNaVoPYeva%0AAND--ngNvzqu%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
randomStr = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in xrange(random.randint(6, 12)))
retVal += "--%s%%0A" % randomStr
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
|
{
"content_hash": "88c422575a13e317c7a703e949439cc4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 138,
"avg_line_length": 26.933333333333334,
"alnum_prop": 0.5734323432343235,
"repo_name": "V11/volcano",
"id": "cdd828d5693f512e12441cf9a99a11274b900af7",
"size": "1235",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "server/sqlmap/tamper/space2dash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "118"
},
{
"name": "JavaScript",
"bytes": "41"
}
],
"symlink_target": ""
}
|
from mathbind.compilers.compiler import Compiler
from mathbind.compilers.gcc import GccCompiler
|
{
"content_hash": "9a11e5634fc775c8edac3d301a749e2f",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 48,
"avg_line_length": 47.5,
"alnum_prop": 0.8842105263157894,
"repo_name": "diogenes1oliveira/mathbind",
"id": "4689b1d518da3a2b1426dbcabf345cb115432a37",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mathbind/compilers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67784"
}
],
"symlink_target": ""
}
|
"""Operators that integrat with Google Cloud Build service."""
import re
from copy import deepcopy
from typing import Any, Dict, Iterable, Optional
from urllib.parse import unquote, urlparse
from airflow import AirflowException
from airflow.gcp.hooks.cloud_build import CloudBuildHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
REGEX_REPO_PATH = re.compile(r"^/p/(?P<project_id>[^/]+)/r/(?P<repo_name>[^/]+)")
class BuildProcessor:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param body: The request body.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
:type body: dict
"""
def __init__(self, body: Dict) -> None:
self.body = deepcopy(body)
def _verify_source(self):
is_storage = "storageSource" in self.body["source"]
is_repo = "repoSource" in self.body["source"]
sources_count = sum([is_storage, is_repo])
if sources_count != 1:
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storageSource and repoSource."
)
def _reformat_source(self):
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self):
if "repoSource" not in self.body["source"]:
return
source = self.body["source"]["repoSource"]
if not isinstance(source, str):
return
self.body["source"]["repoSource"] = self._convert_repo_url_to_dict(source)
def _reformat_storage_source(self):
if "storageSource" not in self.body["source"]:
return
source = self.body["source"]["storageSource"]
if not isinstance(source, str):
return
self.body["source"]["storageSource"] = self._convert_storage_url_to_dict(source)
def process_body(self):
"""
Processes the body passed in the constructor
:return: the body.
:type: dict
"""
self._verify_source()
self._reformat_source()
return self.body
@staticmethod
def _convert_repo_url_to_dict(source):
"""
Convert url to repository in Google Cloud Source to a format supported by the API
Example valid input:
.. code-block:: none
https://source.developers.google.com/p/airflow-project/r/airflow-repo#branch-name
"""
url_parts = urlparse(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.developers.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.developers.google.com/p/airflow-project/r/airflow-repo#branch-name"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
source_dict = {"projectId": project_id, "repoName": repo_name, "branchName": "master"}
if url_parts.fragment:
source_dict["branchName"] = url_parts.fragment
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> Dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlparse(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict = {"bucket": url_parts.hostname, "object": url_parts.path[1:]}
if url_parts.fragment:
source_dict["generation"] = url_parts.fragment
return source_dict
class CloudBuildCreateBuildOperator(BaseOperator):
"""
Starts a build with the specified configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildOperator`
:param body: The request body.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
:type body: dict
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:type project_id: str
:param gcp_conn_id: The connection ID to use to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (for example v1 or v1beta1).
:type api_version: str
"""
template_fields = ("body", "gcp_conn_id", "api_version") # type: Iterable[str]
@apply_defaults
def __init__(self,
body: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.body = body
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
def _validate_inputs(self):
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, api_version=self.api_version)
body = BuildProcessor(body=self.body).process_body()
return hook.create_build(body=body, project_id=self.project_id)
|
{
"content_hash": "9040a67b295bba3c737dd9b19a073edb",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 109,
"avg_line_length": 33.25136612021858,
"alnum_prop": 0.6218570254724733,
"repo_name": "Fokko/incubator-airflow",
"id": "e1a9d0256c925ef3530f3a966c75ad5966a48ac7",
"size": "6896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/gcp/operators/cloud_build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import sys
import telepot
from telepot.delegate import per_inline_from_id, create_open, pave_event_space
"""
$ python3.5 inline.py <token>
It demonstrates answering inline query and getting chosen inline results.
"""
class InlineHandler(telepot.helper.InlineUserHandler, telepot.helper.AnswererMixin):
def __init__(self, *args, **kwargs):
super(InlineHandler, self).__init__(*args, **kwargs)
def on_inline_query(self, msg):
def compute_answer():
query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')
print(self.id, ':', 'Inline Query:', query_id, from_id, query_string)
articles = [{'type': 'article',
'id': 'abc', 'title': query_string, 'message_text': query_string}]
return articles
self.answerer.answer(msg, compute_answer)
def on_chosen_inline_result(self, msg):
from pprint import pprint
pprint(msg)
result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')
print(self.id, ':', 'Chosen Inline Result:', result_id, from_id, query_string)
#TOKEN = sys.argv[1]
bot = telepot.DelegatorBot('292106014:AAEdLmqqhYHhDncqidNtFSNx9Mj7Fil50_8', [
pave_event_space()(
per_inline_from_id(), create_open, InlineHandler, timeout=10),
])
bot.message_loop(run_forever='Listening ...')
|
{
"content_hash": "475ee0943d8210471eddee8395c20142",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 95,
"avg_line_length": 35.64102564102564,
"alnum_prop": 0.6510791366906474,
"repo_name": "TEJESH/gandhi",
"id": "e7011df23d4807d3f423c6b92ac073fe4b80c1bb",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66693"
}
],
"symlink_target": ""
}
|
"""
Support for thr Free Mobile SMS platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.free_mobile/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['freesms==0.1.0']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
})
def get_service(hass, config):
"""Get the Free Mobile SMS notification service."""
return FreeSMSNotificationService(config[CONF_USERNAME],
config[CONF_ACCESS_TOKEN])
# pylint: disable=too-few-public-methods
class FreeSMSNotificationService(BaseNotificationService):
"""Implement a notification service for the Free Mobile SMS service."""
def __init__(self, username, access_token):
"""Initialize the service."""
from freesms import FreeClient
self.free_client = FreeClient(username, access_token)
def send_message(self, message="", **kwargs):
"""Send a message to the Free Mobile user cell."""
resp = self.free_client.send_sms(message)
if resp.status_code == 400:
_LOGGER.error("At least one parameter is missing")
elif resp.status_code == 402:
_LOGGER.error("Too much SMS send in a few time")
elif resp.status_code == 403:
_LOGGER.error("Wrong Username/Password")
elif resp.status_code == 500:
_LOGGER.error("Server error, try later")
|
{
"content_hash": "a919545c22b7c4065fe71429109e4cf2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 34.03846153846154,
"alnum_prop": 0.6830508474576271,
"repo_name": "hexxter/home-assistant",
"id": "e5209e06582067958138f6e01cf23be83ef37e5c",
"size": "1770",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/free_mobile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1371597"
},
{
"name": "Python",
"bytes": "3699472"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="scattergl", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "5e2dd9697ea755d28d59977d1ef3a077",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 37.5,
"alnum_prop": 0.6177777777777778,
"repo_name": "plotly/plotly.py",
"id": "013254c07f23aafde14c93bfd05e0ce6a2966f36",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/_legendwidth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Django settings for heroku_blog project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e9#d75ju-ow%xh22w*@lg-1s@o9kq1*un@sdb(g22t7zy2)(35'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'heroku_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heroku_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
CELERY_BROKER_URL = 'redis://h:p3f9d67d2b4a54525ea18a7af8c6c01e3de88e02620e962b794cc3a605d50d48c@ec2-34-206-214-110.compute-1.amazonaws.com:33139'
CELERY_RESULT_BACKEND = 'redis://h:p3f9d67d2b4a54525ea18a7af8c6c01e3de88e02620e962b794cc3a605d50d48c@ec2-34-206-214-110.compute-1.amazonaws.com:33139'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
|
{
"content_hash": "0d3875de32e13c7c8842e979e5ed1007",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 150,
"avg_line_length": 27.637037037037036,
"alnum_prop": 0.704100777271509,
"repo_name": "barbossa/django-heroku-blog",
"id": "b97d7df5b5aa5f40bb278d42ffaa641412f0d5cc",
"size": "3731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heroku_blog/settings/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2430"
},
{
"name": "HTML",
"bytes": "3998"
},
{
"name": "Python",
"bytes": "11683"
}
],
"symlink_target": ""
}
|
"""
__graph_MT_post__ComponentPrototype.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
________________________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__ComponentPrototype(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 153, 80
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([160.0, 60.0, 160.0, 60.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([21.0, 19.0, 164.0, 97.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'skyblue1')
self.gf8 = GraphicalForm(drawing, h, "gf8")
self.graphForms.append(self.gf8)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([93.0, 29.0, 93.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__ComponentPrototype', width = '0', justify= 'left', stipple='' )
self.gf39 = GraphicalForm(drawing, h, 'gf39', fontObject=font)
self.graphForms.append(self.gf39)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__ComponentPrototype
|
{
"content_hash": "55061f42b84486aa47bc242a342adbe3",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 221,
"avg_line_length": 42.38095238095238,
"alnum_prop": 0.5805243445692884,
"repo_name": "levilucio/SyVOLT",
"id": "a466a67bde7f1e081fdcfa23ef491e6454938c33",
"size": "2670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/graph_MT_post__ComponentPrototype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
"""Unit tests for Superset"""
from datetime import datetime
import imp
import json
from contextlib import contextmanager
from typing import Any, Dict, Union, List, Optional
from unittest.mock import Mock, patch, MagicMock
import pandas as pd
from flask import Response
from flask_appbuilder.security.sqla import models as ab_models
from flask_testing import TestCase
from sqlalchemy.engine.interfaces import Dialect
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import Session
from sqlalchemy.sql import func
from sqlalchemy.dialects.mysql import dialect
from tests.integration_tests.test_app import app, login
from superset.sql_parse import CtasMethod
from superset import db, security_manager
from superset.connectors.base.models import BaseDatasource
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from superset.models.slice import Slice
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.utils.core import get_example_default_schema
from superset.utils.database import get_example_database
from superset.views.base_api import BaseSupersetModelRestApi
FAKE_DB_NAME = "fake_db_100"
test_client = app.test_client()
def get_resp(
client: Any,
url: str,
data: Any = None,
follow_redirects: bool = True,
raise_on_error: bool = True,
json_: Optional[str] = None,
):
"""Shortcut to get the parsed results while following redirects"""
if data:
resp = client.post(url, data=data, follow_redirects=follow_redirects)
elif json_:
resp = client.post(url, json=json_, follow_redirects=follow_redirects)
else:
resp = client.get(url, follow_redirects=follow_redirects)
if raise_on_error and resp.status_code > 400:
raise Exception("http request failed with code {}".format(resp.status_code))
return resp.data.decode("utf-8")
def post_assert_metric(
client: Any, uri: str, data: Dict[str, Any], func_name: str
) -> Response:
"""
Simple client post with an extra assertion for statsd metrics
:param client: test client for superset api requests
:param uri: The URI to use for the HTTP POST
:param data: The JSON data payload to be posted
:param func_name: The function name that the HTTP POST triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = client.post(uri, json=data)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
class SupersetTestCase(TestCase):
default_schema_backend_map = {
"sqlite": "main",
"mysql": "superset",
"postgresql": "public",
"presto": "default",
"hive": "default",
}
maxDiff = -1
def create_app(self):
return app
@staticmethod
def get_nonexistent_numeric_id(model):
return (db.session.query(func.max(model.id)).scalar() or 0) + 1
@staticmethod
def get_birth_names_dataset() -> SqlaTable:
return SupersetTestCase.get_table(name="birth_names")
@staticmethod
def create_user_with_roles(
username: str, roles: List[str], should_create_roles: bool = False
):
user_to_create = security_manager.find_user(username)
if not user_to_create:
security_manager.add_user(
username,
username,
username,
f"{username}@superset.com",
security_manager.find_role("Gamma"), # it needs a role
password="general",
)
db.session.commit()
user_to_create = security_manager.find_user(username)
assert user_to_create
user_to_create.roles = []
for chosen_user_role in roles:
if should_create_roles:
# copy role from gamma but without data permissions
security_manager.copy_role("Gamma", chosen_user_role, merge=False)
user_to_create.roles.append(security_manager.find_role(chosen_user_role))
db.session.commit()
return user_to_create
@staticmethod
def create_user(
username: str,
password: str,
role_name: str,
first_name: str = "admin",
last_name: str = "user",
email: str = "[email protected]",
) -> Union[ab_models.User, bool]:
role_admin = security_manager.find_role(role_name)
return security_manager.add_user(
username, first_name, last_name, email, role_admin, password
)
@staticmethod
def get_user(username: str) -> ab_models.User:
user = (
db.session.query(security_manager.user_model)
.filter_by(username=username)
.one_or_none()
)
return user
@staticmethod
def get_role(name: str) -> Optional[ab_models.User]:
user = (
db.session.query(security_manager.role_model)
.filter_by(name=name)
.one_or_none()
)
return user
@staticmethod
def get_table_by_id(table_id: int) -> SqlaTable:
return db.session.query(SqlaTable).filter_by(id=table_id).one()
@staticmethod
def is_module_installed(module_name):
try:
imp.find_module(module_name)
return True
except ImportError:
return False
def get_or_create(self, cls, criteria, session, **kwargs):
obj = session.query(cls).filter_by(**criteria).first()
if not obj:
obj = cls(**criteria)
obj.__dict__.update(**kwargs)
session.add(obj)
session.commit()
return obj
def login(self, username="admin", password="general"):
return login(self.client, username, password)
def get_slice(
self, slice_name: str, session: Session, expunge_from_session: bool = True
) -> Slice:
slc = session.query(Slice).filter_by(slice_name=slice_name).one()
if expunge_from_session:
session.expunge_all()
return slc
@staticmethod
def get_table(
name: str, database_id: Optional[int] = None, schema: Optional[str] = None
) -> SqlaTable:
schema = schema or get_example_default_schema()
return (
db.session.query(SqlaTable)
.filter_by(
database_id=database_id
or SupersetTestCase.get_database_by_name("examples").id,
schema=schema,
table_name=name,
)
.one()
)
@staticmethod
def get_database_by_id(db_id: int) -> Database:
return db.session.query(Database).filter_by(id=db_id).one()
@staticmethod
def get_database_by_name(database_name: str = "main") -> Database:
if database_name == "examples":
return get_example_database()
else:
raise ValueError("Database doesn't exist")
@staticmethod
def get_datasource_mock() -> BaseDatasource:
datasource = MagicMock()
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = None
results.df = pd.DataFrame()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_col = Mock(return_value=mock_dttm_col)
datasource.query = Mock(return_value=results)
datasource.database = Mock()
datasource.database.db_engine_spec = Mock()
datasource.database.db_engine_spec.mutate_expression_label = lambda x: x
datasource.owners = MagicMock()
return datasource
def get_resp(
self, url, data=None, follow_redirects=True, raise_on_error=True, json_=None
):
return get_resp(self.client, url, data, follow_redirects, raise_on_error, json_)
def get_json_resp(
self, url, data=None, follow_redirects=True, raise_on_error=True, json_=None
):
"""Shortcut to get the parsed results while following redirects"""
resp = self.get_resp(url, data, follow_redirects, raise_on_error, json_)
return json.loads(resp)
def get_access_requests(self, username, ds_type, ds_id):
DAR = DatasourceAccessRequest
return (
db.session.query(DAR)
.filter(
DAR.created_by == security_manager.find_user(username=username),
DAR.datasource_type == ds_type,
DAR.datasource_id == ds_id,
)
.first()
)
def logout(self):
self.client.get("/logout/", follow_redirects=True)
def grant_public_access_to_table(self, table):
role_name = "Public"
self.grant_role_access_to_table(table, role_name)
def grant_role_access_to_table(self, table, role_name):
role = security_manager.find_role(role_name)
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (
perm.permission.name == "datasource_access"
and perm.view_menu
and table.perm in perm.view_menu.name
):
security_manager.add_permission_role(role, perm)
def revoke_public_access_to_table(self, table):
role_name = "Public"
self.revoke_role_access_to_table(role_name, table)
def revoke_role_access_to_table(self, role_name, table):
public_role = security_manager.find_role(role_name)
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (
perm.permission.name == "datasource_access"
and perm.view_menu
and table.perm in perm.view_menu.name
):
security_manager.del_permission_role(public_role, perm)
def run_sql(
self,
sql,
client_id=None,
username=None,
raise_on_error=False,
query_limit=None,
database_name="examples",
sql_editor_id=None,
select_as_cta=False,
tmp_table_name=None,
schema=None,
ctas_method=CtasMethod.TABLE,
template_params="{}",
):
if username:
self.logout()
self.login(username=username)
dbid = SupersetTestCase.get_database_by_name(database_name).id
json_payload = {
"database_id": dbid,
"sql": sql,
"client_id": client_id,
"queryLimit": query_limit,
"sql_editor_id": sql_editor_id,
"ctas_method": ctas_method,
"templateParams": template_params,
}
if tmp_table_name:
json_payload["tmp_table_name"] = tmp_table_name
if select_as_cta:
json_payload["select_as_cta"] = select_as_cta
if schema:
json_payload["schema"] = schema
resp = self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
if raise_on_error and "error" in resp:
raise Exception("run_sql failed")
return resp
def create_fake_db(self):
self.login(username="admin")
database_name = FAKE_DB_NAME
db_id = 100
extra = """{
"schemas_allowed_for_file_upload":
["this_schema_is_allowed", "this_schema_is_allowed_too"]
}"""
return self.get_or_create(
cls=models.Database,
criteria={"database_name": database_name},
session=db.session,
sqlalchemy_uri="sqlite:///:memory:",
id=db_id,
extra=extra,
)
def delete_fake_db(self):
database = (
db.session.query(Database)
.filter(Database.database_name == FAKE_DB_NAME)
.scalar()
)
if database:
db.session.delete(database)
def create_fake_db_for_macros(self):
self.login(username="admin")
database_name = "db_for_macros_testing"
db_id = 200
database = self.get_or_create(
cls=models.Database,
criteria={"database_name": database_name},
session=db.session,
sqlalchemy_uri="db_for_macros_testing://user@host:8080/hive",
id=db_id,
)
def mock_get_dialect() -> Dialect:
return dialect()
database.get_dialect = mock_get_dialect
return database
@staticmethod
def delete_fake_db_for_macros():
database = (
db.session.query(Database)
.filter(Database.database_name == "db_for_macros_testing")
.scalar()
)
if database:
db.session.delete(database)
db.session.commit()
def validate_sql(
self,
sql,
client_id=None,
username=None,
raise_on_error=False,
database_name="examples",
template_params=None,
):
if username:
self.logout()
self.login(username=username)
dbid = SupersetTestCase.get_database_by_name(database_name).id
resp = self.get_json_resp(
"/superset/validate_sql_json/",
raise_on_error=False,
data=dict(
database_id=dbid,
sql=sql,
client_id=client_id,
templateParams=template_params,
),
)
if raise_on_error and "error" in resp:
raise Exception("validate_sql failed")
return resp
def get_dash_by_slug(self, dash_slug):
sesh = db.session()
return sesh.query(Dashboard).filter_by(slug=dash_slug).first()
def get_assert_metric(self, uri: str, func_name: str) -> Response:
"""
Simple client get with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP GET
:param func_name: The function name that the HTTP GET triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.get(uri)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
def delete_assert_metric(self, uri: str, func_name: str) -> Response:
"""
Simple client delete with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP DELETE
:param func_name: The function name that the HTTP DELETE triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.delete(uri)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
def post_assert_metric(
self, uri: str, data: Dict[str, Any], func_name: str
) -> Response:
return post_assert_metric(self.client, uri, data, func_name)
def put_assert_metric(
self, uri: str, data: Dict[str, Any], func_name: str
) -> Response:
"""
Simple client put with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP PUT
:param data: The JSON data payload to be posted
:param func_name: The function name that the HTTP PUT triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.put(uri, json=data)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
@classmethod
def get_dttm(cls):
return datetime.strptime("2019-01-02 03:04:05.678900", "%Y-%m-%d %H:%M:%S.%f")
@contextmanager
def db_insert_temp_object(obj: DeclarativeMeta):
"""Insert a temporary object in database; delete when done."""
session = db.session
try:
session.add(obj)
session.commit()
yield obj
finally:
session.delete(obj)
session.commit()
|
{
"content_hash": "f2790a64078e6932de9c7c435b62daf9",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 88,
"avg_line_length": 33.4604743083004,
"alnum_prop": 0.5981336010867639,
"repo_name": "airbnb/caravel",
"id": "20e324559363fc370ea145342722540e48aeaca5",
"size": "17734",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integration_tests/base_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57416"
},
{
"name": "HTML",
"bytes": "112618"
},
{
"name": "JavaScript",
"bytes": "406496"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "588212"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
}
|
from panda3d.direct import ShowInterval
from panda3d.core import Camera, Point3, Vec3, headsUp
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
import random
import BattleParticles
from BattleProps import *
from BattleSounds import *
import MovieCamera
import MovieUtil
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
from toontown.toon import NPCToons
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
notify = DirectNotifyGlobal.directNotify.newCategory('MovieNPCSOS')
soundFiles = ('AA_heal_tickle.ogg', 'AA_heal_telljoke.ogg', 'AA_heal_smooch.ogg', 'AA_heal_happydance.ogg', 'AA_heal_pixiedust.ogg', 'AA_heal_juggle.ogg')
offset = Point3(0, 4.0, 0)
def doNPCSOSs(NPCSOSs):
if len(NPCSOSs) == 0:
return (None, None)
else:
track = Sequence()
textTrack = Sequence()
for n in NPCSOSs:
ival, textIval = __doNPCSOS(n)
if ival:
track.append(ival)
textTrack.append(textIval)
camDuration = track.getDuration()
if camDuration > 0.0:
camTrack = MovieCamera.chooseHealShot(NPCSOSs, camDuration)
else:
camTrack = Sequence()
return (track, Parallel(camTrack, textTrack))
def __doNPCSOS(sos):
npcId = sos['npcId']
track, level, hp = NPCToons.getNPCTrackLevelHp(npcId)
if track != None:
return NPCSOSfn_dict[track](sos, level, hp)
else:
return __cogsMiss(sos, 0, 0)
return
def __healToon(toon, hp, ineffective = 0):
notify.debug('healToon() - toon: %d hp: %d ineffective: %d' % (toon.doId, hp, ineffective))
if ineffective == 1:
laughter = random.choice(TTLocalizer.MovieHealLaughterMisses)
else:
maxDam = ToontownBattleGlobals.AvPropDamage[0][1][0][1]
if hp >= maxDam - 1:
laughter = random.choice(TTLocalizer.MovieHealLaughterHits2)
else:
laughter = random.choice(TTLocalizer.MovieHealLaughterHits1)
toon.setChatAbsolute(laughter, CFSpeech | CFTimeout)
def __getSoundTrack(level, delay, duration = None, node = None):
soundEffect = globalBattleSoundCache.getSound(soundFiles[level])
soundIntervals = Sequence()
if soundEffect:
if duration:
playSound = SoundInterval(soundEffect, duration=duration, node=node)
else:
playSound = SoundInterval(soundEffect, node=node)
soundIntervals.append(Wait(delay))
soundIntervals.append(playSound)
return soundIntervals
def teleportIn(attack, npc, pos = Point3(0, 0, 0), hpr = Vec3(180.0, 0.0, 0.0)):
a = Func(npc.reparentTo, attack['battle'])
b = Func(npc.setPos, pos)
c = Func(npc.setHpr, hpr)
d = Func(npc.pose, 'teleport', npc.getNumFrames('teleport') - 1)
e = npc.getTeleportInTrack()
ee = Func(npc.addActive)
f = Func(npc.setChatAbsolute, TTLocalizer.MovieNPCSOSGreeting % attack['toon'].getName(), CFSpeech | CFTimeout)
g = ActorInterval(npc, 'wave')
h = Func(npc.loop, 'neutral')
seq = Sequence(a, b, c, d, e, ee, f, g, h)
seq.append(Func(npc.clearChat))
if npc.getName() == 'Magic Cat':
magicCatTrack = Sequence()
magicCatTrack.append(Func(npc.setChatAbsolute, "I've got this, so start dancing!", CFSpeech | CFTimeout))
magicCatTrack.append(Func(attack['toon'].loop, 'victory'))
seq.append(magicCatTrack)
return seq
def teleportOut(attack, npc):
if npc.style.getGender() == 'm':
a = ActorInterval(npc, 'bow')
else:
a = ActorInterval(npc, 'curtsy')
b = Func(npc.setChatAbsolute, TTLocalizer.MovieNPCSOSGoodbye, CFSpeech | CFTimeout)
c = npc.getTeleportOutTrack()
seq = Sequence(a, b, c)
seq.append(Func(npc.removeActive))
seq.append(Func(npc.detachNode))
seq.append(Func(npc.delete))
return seq
def __getPartTrack(particleEffect, startDelay, durationDelay, partExtraArgs):
pEffect = partExtraArgs[0]
parent = partExtraArgs[1]
if len(partExtraArgs) == 3:
worldRelative = partExtraArgs[2]
else:
worldRelative = 1
return Sequence(Wait(startDelay), ParticleInterval(pEffect, parent, worldRelative, duration=durationDelay, cleanup=True))
def __doSprinkle(attack, recipients, hp = 0):
toon = NPCToons.createLocalNPC(attack['npcId'])
if toon == None:
return
else:
targets = attack[recipients]
level = 4
battle = attack['battle']
track = Sequence(teleportIn(attack, toon))
def face90(target, toon, battle):
vec = Point3(target.getPos(battle) - toon.getPos(battle))
vec.setZ(0)
temp = vec[0]
vec.setX(-vec[1])
vec.setY(temp)
targetPoint = Point3(toon.getPos(battle) + vec)
toon.headsUp(battle, targetPoint)
delay = 2.5
effectTrack = Sequence()
for target in targets:
sprayEffect = BattleParticles.createParticleEffect(file='pixieSpray')
dropEffect = BattleParticles.createParticleEffect(file='pixieDrop')
explodeEffect = BattleParticles.createParticleEffect(file='pixieExplode')
poofEffect = BattleParticles.createParticleEffect(file='pixiePoof')
wallEffect = BattleParticles.createParticleEffect(file='pixieWall')
mtrack = Parallel(__getPartTrack(sprayEffect, 1.5, 0.5, [sprayEffect, toon, 0]), __getPartTrack(dropEffect, 1.9, 2.0, [dropEffect, target, 0]), __getPartTrack(explodeEffect, 2.7, 1.0, [explodeEffect, toon, 0]), __getPartTrack(poofEffect, 3.4, 1.0, [poofEffect, target, 0]), __getPartTrack(wallEffect, 4.05, 1.2, [wallEffect, toon, 0]), __getSoundTrack(level, 2, duration=3.1, node=toon), Sequence(Func(face90, target, toon, battle), ActorInterval(toon, 'sprinkle-dust')), Sequence(Wait(delay), Func(__healToon, target, hp)))
effectTrack.append(mtrack)
track.append(effectTrack)
track.append(Func(toon.setHpr, Vec3(180.0, 0.0, 0.0)))
track.append(teleportOut(attack, toon))
return track
def __doSmooch(attack, hp = 0):
toon = NPCToons.createLocalNPC(attack['npcId'])
if toon == None:
return
else:
targets = attack['toons']
level = 2
battle = attack['battle']
track = Sequence(teleportIn(attack, toon))
lipstick = globalPropPool.getProp('lipstick')
rightHand = toon.getRightHand()
dScale = 0.5
lipstickTrack = Sequence(Func(MovieUtil.showProp, lipstick, rightHand, Point3(-0.27, -0.24, -0.95), Point3(-118, -10.6, -25.9)), LerpScaleInterval(lipstick, dScale, MovieUtil.PNT3_NEARZERO, MovieUtil.PNT3_ONE), Wait(toon.getDuration('smooch') - 2.0 * dScale), LerpScaleInterval(lipstick, dScale, MovieUtil.PNT3_ONE, MovieUtil.PNT3_NEARZERO))
lips = globalPropPool.getProp('lips')
dScale = 0.5
tLips = 2.5
tThrow = 115.0 / toon.getFrameRate('smooch')
dThrow = 0.5
def getLipPos(toon = toon):
toon.pose('smooch', 57)
toon.update(0)
hand = toon.getRightHand()
return hand.getPos(render)
effectTrack = Sequence()
for target in targets:
lipcopy = MovieUtil.copyProp(lips)
lipsTrack = Sequence(Wait(tLips), Func(MovieUtil.showProp, lipcopy, render, getLipPos), Func(lipcopy.setBillboardPointWorld), LerpScaleInterval(lipcopy, dScale, Point3(3, 3, 3), startScale=MovieUtil.PNT3_NEARZERO), Wait(tThrow - tLips - dScale), LerpPosInterval(lipcopy, dThrow, Point3(target.getPos() + Point3(0, 0, target.getHeight()))), Func(MovieUtil.removeProp, lipcopy))
delay = tThrow + dThrow
mtrack = Parallel(lipstickTrack, lipsTrack, __getSoundTrack(level, 2, node=toon), Sequence(ActorInterval(toon, 'smooch')), Sequence(Wait(delay), ActorInterval(target, 'conked')), Sequence(Wait(delay), Func(__healToon, target, hp)))
effectTrack.append(mtrack)
effectTrack.append(Func(MovieUtil.removeProp, lipstick))
track.append(effectTrack)
track.append(teleportOut(attack, toon))
track.append(Func(target.clearChat))
return track
def __doSprinkleSOS(attack, level, hp, target, sosText):
track = __doSprinkle(attack, target, hp)
pbpText = attack['playByPlayText']
pbpTrack = pbpText.getShowInterval(sosText % level, track.getDuration())
return (track, pbpTrack)
def __doToonsHit(attack, level, hp):
return __doSprinkleSOS(attack, level, hp, 'toons', TTLocalizer.MovieNPCSOSToonsHit)
def __doCogsMiss(attack, level, hp):
return __doSprinkleSOS(attack, level, hp, 'suits', TTLocalizer.MovieNPCSOSCogsMiss)
def __doToonsPowerUp(attack, level, hp):
return __doSprinkleSOS(attack, level, hp, 'toons', TTLocalizer.MovieNPCSOSToonsPowerUp)
def __doCogsPowerDown(attack, level, hp):
return __doSprinkleSOS(attack, level, hp, 'suits', TTLocalizer.MovieNPCSOSCogsPowerDown)
def __doRestockGags(attack, level, hp):
track = __doSmooch(attack, hp)
pbpText = attack['playByPlayText']
if level == ToontownBattleGlobals.HEAL_TRACK:
text = TTLocalizer.MovieNPCSOSHeal
elif level == ToontownBattleGlobals.TRAP_TRACK:
text = TTLocalizer.MovieNPCSOSTrap
elif level == ToontownBattleGlobals.LURE_TRACK:
text = TTLocalizer.MovieNPCSOSLure
elif level == ToontownBattleGlobals.SOUND_TRACK:
text = TTLocalizer.MovieNPCSOSSound
elif level == ToontownBattleGlobals.THROW_TRACK:
text = TTLocalizer.MovieNPCSOSThrow
elif level == ToontownBattleGlobals.SQUIRT_TRACK:
text = TTLocalizer.MovieNPCSOSSquirt
elif level == ToontownBattleGlobals.DROP_TRACK:
text = TTLocalizer.MovieNPCSOSDrop
elif level == -1:
text = TTLocalizer.MovieNPCSOSAll
pbpTrack = pbpText.getShowInterval(TTLocalizer.MovieNPCSOSRestockGags % text, track.getDuration())
return (track, pbpTrack)
def doNPCTeleports(attacks):
npcs = []
npcDatas = []
arrivals = Sequence()
departures = Parallel()
for attack in attacks:
if 'npcId' in attack:
npcId = attack['npcId']
npc = NPCToons.createLocalNPC(npcId)
if npc != None:
npcs.append(npc)
attack['npc'] = npc
toon = attack['toon']
battle = attack['battle']
pos = toon.getPos(battle) + offset
hpr = toon.getHpr(battle)
npcDatas.append((npc, battle, hpr))
arrival = teleportIn(attack, npc, pos=pos)
arrivals.append(arrival)
departure = teleportOut(attack, npc)
departures.append(departure)
turns = Parallel()
unturns = Parallel()
hpr = Vec3(180.0, 0, 0)
for npc in npcDatas:
turns.append(Func(npc[0].setHpr, npc[1], npc[2]))
unturns.append(Func(npc[0].setHpr, npc[1], hpr))
arrivals.append(turns)
unturns.append(departures)
return (arrivals, unturns, npcs)
NPCSOSfn_dict = {ToontownBattleGlobals.NPC_COGS_MISS: __doCogsMiss,
ToontownBattleGlobals.NPC_TOONS_HIT: __doToonsHit,
ToontownBattleGlobals.NPC_COGS_POWER_DOWN: __doCogsPowerDown,
ToontownBattleGlobals.NPC_TOONS_POWER_UP: __doToonsPowerUp,
ToontownBattleGlobals.NPC_RESTOCK_GAGS: __doRestockGags}
|
{
"content_hash": "5499657054e4cf7f83c73edf55260f6d",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 536,
"avg_line_length": 41.785714285714285,
"alnum_prop": 0.645042735042735,
"repo_name": "DedMemez/ODS-August-2017",
"id": "edebe22660e9db838492800eb1545fb934c1fc2a",
"size": "11788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "battle/MovieNPCSOS.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
}
|
beatbox.You.create_original_Universe()
# Number of noise realizations to make
numreal=1000
#Make the mock Universe
beatbox.You.initiate_simulated_universe(printout=0)
beatbox.You.all_simulated_universes[0].show_CMB_T_map(from_perspective_of="observer")
MockUniverse = np.array([])
MockUniverse = np.append(MockUniverse, [beatbox.Universe() for i in range(numreal)])
beatbox.You.all_reconstructed_universes = np.append(beatbox.You.all_reconstructed_universes, [beatbox.Universe() for i in range(numreal)])
pvals=np.array([])
chi2vals=np.array([])
# Calculate C_yy from the 100 posterior sample Commander Planck CMB temperature maps
# or load the C_yy matrix if already calculated
if not os.path.isfile('../data/covCyy_lmax%d_lmin%d.txt' % (beatbox.Multiverse.truncated_lmax, beatbox.Multiverse.truncated_lmin)):
beatbox.You.read_Planck_samples()
beatbox.You.calculate_covariance_matrix(filename='lmax%d_lmin%d' % (beatbox.Multiverse.truncated_lmax, beatbox.Multiverse.truncated_lmin))
else:
beatbox.You.load_covariance_matrix(filename='covCyy_lmax%d_lmin%d.txt' % (beatbox.Multiverse.truncated_lmax, beatbox.Multiverse.truncated_lmin))
# Calculate the inverse of the a_y covariance matrix
beatbox.You.calculate_sdv_Cyy_inverse()
for i in range(numreal):
# First, generate one realization of noise
noise = beatbox.You.generate_one_realization_of_noise()
# Add the noise to the initial realization of the sky map
datamap = beatbox.You.all_simulated_universes[0].ay2ayreal_for_inference(beatbox.You.all_simulated_universes[0].ay)+noise
datamap = datamap.T
MockUniverse[i].ay_real=datamap
MockUniverse[i].ayreal2ay_for_mapping(datamap)
MockUniverse[i].ay2alm(MockUniverse[i].ay)
# Reconstruct the fn's
beatbox.You.solve_for_3D_potential(datamap, print_alpha = 0)
beatbox.You.all_reconstructed_universes[i].fn=beatbox.You.reconstrunct_fn
beatbox.You.all_reconstructed_universes[i].transform_3D_potential_into_alm( usedefault=1, fn=1)
#beatbox.You.all_reconstructed_universes[i].transform_3D_potential_into_alm( truncated_nmax=beatbox.You.all_reconstructed_universes[i].truncated_nmax, truncated_nmin=beatbox.You.all_reconstructed_universes[i].truncated_nmin, truncated_lmax=beatbox.You.all_reconstructed_universes[i].truncated_lmax, truncated_lmin=beatbox.You.all_reconstructed_universes[i].truncated_lmin,usedefault=1, fn=1)
p_value, chi2value = beatbox.You.calculate_chi2_in_posterior( beatbox.You.all_simulated_universes[0].fn, beatbox.You.all_reconstructed_universes[i].fn)
pvals = np.append(pvals, p_value)
chi2vals = np.append(chi2vals, chi2value)
# ---------------------------------------
## Now look at the distribution of reconstructed f_n's
## Which f_n to look at in each reconstruction:
#n = 30
#
#smarts=np.zeros(numreal)
#for i in range(numreal):
# smarts[i] = beatbox.You.all_reconstructed_universes[i].fn[n]
#print smarts[:].shape
#
## Make a histogram
#nbins, bins, patches = plt.hist(smarts, 100, normed=1, facecolor='green', alpha=0.75)
#plt.xlabel('Value of f_n for n = '+n)
#plt.ylabel('Probability')
##plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
##plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
#
#plt.axvline(beatbox.You.all_simulated_universes[0].fn[n])
#
##plt.axvline(We.fn[5])
#
#plt.show()
#
## -------------------------------------------
#
## Now look at the distribution of reconstructed a_y's
#
## what value of y to look at
#yval = 74
#ayval=np.zeros(numreal)
#for i in range(numreal):
# ayval[i] = beatbox.You.all_reconstructed_universes[i].ay[yval][0]
#
#
#n, bins, patches = plt.hist(ayval, 40, normed=1, facecolor='yellow', alpha=0.75)
#
#plt.xlabel('Value of a_y for y = ' + yval)
#plt.ylabel('Probability')
##plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
##plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
#
#plt.axvline(beatbox.You.all_simulated_universes[0].ay[yval])
#
##plt.axvline(We.ay[5])
#
#plt.show()
#
## -----------------------------------------------
#
##Sample the posterior distributon
#
#beatbox.You.generate_realizations_from_posterior(We.fn, number_of_realizations=1000)
#
#for k in range(10):
# beatbox.You.all_simulated_universes[-1-k].show_CMB_T_map(from_perspective_of = "observer")
#
##for k in range(10):
## beatbox.You.all_simulated_universes[-1-k].show_CMB_T_map(from_perspective_of = s"observer")
#post106=np.zeros(1000)
#for i in range(1000):
# post106[i] = beatbox.You.all_simulated_universes[-1-i].fn[106]
#
#n, bins, patches = plt.hist(post106, 20, normed=1, facecolor='green', alpha=0.75)
#
#plt.xlabel('Smarts')
##plt.ylabel('Probability')
##plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
##plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
#
#plt.axvline(beatbox.You.all_simulated_universes[0].fn[106])
#
##plt.axvline(We.fn[5])
#
#plt.show()
|
{
"content_hash": "a2cdfd13153a6fde715097a5381ccae2",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 395,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.7001839362354384,
"repo_name": "rogerblandford/Music",
"id": "9e24322b69013fe6f8e56e367f919a69365fa6f3",
"size": "5082",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Scripts/_ExploreMockManyNoise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4983305"
},
{
"name": "Python",
"bytes": "115311"
}
],
"symlink_target": ""
}
|
from authorize import BankAccount
from authorize import Customer
from authorize import AuthorizeResponseError
from nose.plugins.attrib import attr
from unittest import TestCase
BANK_ACCOUNT = {
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
}
FULL_BANK_ACCOUNT = {
'customer_type': 'individual',
'account_type': 'checking',
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
'bank_name': 'Evil Bank Co.',
'echeck_type': 'CCD',
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
}
PAYMENT_RESULT = {
'bank_account': {
'account_type': 'checking',
'routing_number': 'XXXX1627',
'account_number': 'XXXX8473',
'name_on_account': 'Rob Otron',
'echeck_type': 'WEB',
}
}
@attr('live_tests')
class BankAccountTests(TestCase):
def test_live_bank_account(self):
# Create a customer so that we can test payment creation against him
result = Customer.create()
customer_id = result.customer_id
# Create a new bank account
result = BankAccount.create(customer_id, BANK_ACCOUNT)
payment_id = result.payment_id
# Read credit card data
result = BankAccount.details(customer_id, payment_id)
self.assertEquals(PAYMENT_RESULT, result.payment_profile.payment)
# Update credit card
BankAccount.update(customer_id, payment_id, BANK_ACCOUNT)
# Delete tests
BankAccount.delete(customer_id, payment_id)
self.assertRaises(AuthorizeResponseError, BankAccount.delete, customer_id, payment_id)
def test_live_full_bank_account(self):
# Create a customer so that we can test payment creation against him
result = Customer.create()
customer_id = result.customer_id
# Create a new bank account
result = BankAccount.create(customer_id, FULL_BANK_ACCOUNT)
payment_id = result.payment_id
# Make sure the billing address we set is the same we get back
result = BankAccount.details(customer_id, payment_id)
self.assertEquals(FULL_BANK_ACCOUNT['billing'], result.payment_profile.bill_to)
|
{
"content_hash": "9f6ad855315e9ba8713712baf023774b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 30.878048780487806,
"alnum_prop": 0.6330963665086888,
"repo_name": "vcatalano/py-authorize",
"id": "cbc01dc1d0cbf849e2043b24db33194e00042a16",
"size": "2532",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_live_bank_account.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6713"
},
{
"name": "Makefile",
"bytes": "7198"
},
{
"name": "Python",
"bytes": "191626"
}
],
"symlink_target": ""
}
|
from .node import *
from ..types import *
from .description import *
from .config import *
from .standard import *
|
{
"content_hash": "2e9a5bb50675205fd8097252a676ba45",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 26,
"avg_line_length": 22.8,
"alnum_prop": 0.7368421052631579,
"repo_name": "JonatanAntoni/CMSIS_5",
"id": "85792451e3b63cddd3fa55720b5212a1fbe2882a",
"size": "114",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "CMSIS/DSP/cmsisdsp/sdf/scheduler/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "762206"
},
{
"name": "Batchfile",
"bytes": "4786"
},
{
"name": "C",
"bytes": "23989623"
},
{
"name": "C++",
"bytes": "1726535"
},
{
"name": "CMake",
"bytes": "224266"
},
{
"name": "CSS",
"bytes": "131929"
},
{
"name": "Gnuplot",
"bytes": "1971"
},
{
"name": "HTML",
"bytes": "65697"
},
{
"name": "JavaScript",
"bytes": "93132"
},
{
"name": "Jupyter Notebook",
"bytes": "4509949"
},
{
"name": "Makefile",
"bytes": "638"
},
{
"name": "Modelica",
"bytes": "2613"
},
{
"name": "Python",
"bytes": "823047"
},
{
"name": "Shell",
"bytes": "33886"
},
{
"name": "XSLT",
"bytes": "2134"
}
],
"symlink_target": ""
}
|
TARGETS = [
'MapSimple.py',
'ControlDisableUI.py',
'ControlOptions.py',
'ControlSimple.py',
'DirectionsSimple.py',
'EventArguments.py',
'EventClosure.py',
'EventProperties.py',
'EventSimple.py',
'GeocodingSimple.py',
]
PACKAGE = {
'title': 'Google Maps Example',
'desc': 'Python wrapper around the GMaps JS API',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
|
{
"content_hash": "749802eb6cd85aeb850c2aec0b676092",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 21.70689655172414,
"alnum_prop": 0.6092136616362193,
"repo_name": "pyjs/pyjs",
"id": "a697b12ecb83a8c6c7c45b738db10e257f20cdf8",
"size": "1307",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/gmaps/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
}
|
from typing import List, Tuple
from asn1crypto.cms import CMSAttribute
from cryptography.exceptions import InvalidSignature
from flask import request, g, current_app, abort
from functools import wraps
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from asn1crypto import cms
from base64 import b64decode, b64encode
from . import _certificate_by_signer_identifier, _cryptography_hash_function, _cryptography_pad_function
def _verify_cms_signers(signed_data: bytes, detached: bool = False) -> Tuple[List[x509.Certificate], bytes]:
ci = cms.ContentInfo.load(signed_data)
assert ci['content_type'].native == 'signed_data'
signed: cms.SignedData = ci['content']
current_app.logger.debug("CMS request contains %d certificate(s)", len(signed['certificates']))
signers = []
for signer in signed['signer_infos']:
asn_certificate = _certificate_by_signer_identifier(signed['certificates'], signer['sid'])
assert asn_certificate is not None
certificate = x509.load_der_x509_certificate(asn_certificate.dump(), default_backend())
digest_algorithm = signer['digest_algorithm']
signature_algorithm = signer['signature_algorithm']
hash_function = _cryptography_hash_function(digest_algorithm)
pad_function = _cryptography_pad_function(signature_algorithm)
if hash_function is None or pad_function is None:
raise ValueError('Unsupported signature algorithm: {}'.format(signature_algorithm))
else:
current_app.logger.debug("Using signature algorithm: %s", signature_algorithm.native)
assert signed['encap_content_info']['content_type'].native == 'data'
if detached:
data = request.data
else:
data = signed['encap_content_info']['content'].native
if 'signed_attrs' in signer and len(signer['signed_attrs']) > 0:
for i in range(0, len(signer['signed_attrs'])):
signed_attr: CMSAttribute = signer['signed_attrs'][i]
if signed_attr['type'].native == "message_digest":
current_app.logger.debug("SignerInfo digest: %s", b64encode(signed_attr['values'][0].native))
certificate.public_key().verify(
signer['signature'].native,
signer['signed_attrs'].dump(),
pad_function(),
hash_function()
)
else: # No signed attributes means we are only validating the digest
certificate.public_key().verify(
signer['signature'].native,
data,
pad_function(),
hash_function()
)
signers.append(certificate)
# TODO: Don't assume that content is OctetString
if detached:
return signers, request.data
else:
return signers, signed['encap_content_info']['content'].native
def verify_cms_signers(f):
"""Verify the signers of a request containing a CMS/PKCS#7, DER encoded body.
The certificate of each signer is placed on the global **g** variable as **g.signers** and the signed data is
set as **g.signed_data**.
In unit tests, this decorator is completely disabled by the presence of testing = True
Raises:
- TypeError if *Content-Type* header is not "application/pkcs7-signature"
- SigningError if any signer on the CMS content is not valid.
"""
@wraps(f)
def decorator(*args, **kwargs):
if current_app.testing:
return f(*args, **kwargs)
current_app.logger.debug('Verifying CMS Request Data for request to ', request.url)
if request.headers['Content-Type'] != "application/pkcs7-signature":
raise TypeError("verify_cms_signers expects application/pkcs7-signature, got: {}".format(
request.headers['Content-Type']))
g.signers, g.signed_data = _verify_cms_signers(request.data)
return f(*args, **kwargs)
return decorator
def verify_cms_signers_header(f):
"""Verify the signature supplied by the client in the request using the ``Mdm-Signature`` header.
If the authenticity of the message has been verified,
then the signer is attached to the **g** object as **g.signer**.
In unit tests, this decorator is completely disabled by the presence of testing = True
:reqheader Mdm-Signature: BASE64-encoded CMS Detached Signature of the message. (if `SignMessage` was true)
"""
@wraps(f)
def decorator(*args, **kwargs):
if current_app.testing:
return f(*args, **kwargs)
if 'Mdm-Signature' not in request.headers:
raise TypeError('Client did not supply an Mdm-Signature header but signature is required.')
detached_signature = b64decode(request.headers['Mdm-Signature'])
try:
signers, signed_data = _verify_cms_signers(detached_signature, detached=True)
except InvalidSignature as e:
current_app.logger.warn("Invalid Signature in Mdm-Signature header")
return abort(403)
g.signers = signers
g.signed_data = signed_data
return f(*args, **kwargs)
return decorator
|
{
"content_hash": "54ef761fc66b8cfed112397fab335712",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 113,
"avg_line_length": 38.369565217391305,
"alnum_prop": 0.6536355051935788,
"repo_name": "mosen/commandment",
"id": "f7bdac7170b174de17fc4982b4b2e9789d84c4c0",
"size": "5295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commandment/cms/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2988"
},
{
"name": "HTML",
"bytes": "1265"
},
{
"name": "JavaScript",
"bytes": "6113"
},
{
"name": "Mako",
"bytes": "1110"
},
{
"name": "Python",
"bytes": "420945"
},
{
"name": "Shell",
"bytes": "148"
},
{
"name": "TypeScript",
"bytes": "292822"
}
],
"symlink_target": ""
}
|
"""Illustrates the use of Struct classes, and their differences
from normal classes and namedtuples.
"""
from collections import namedtuple
from simplestruct import Struct, Field
###############################################################################
# Definition #
###############################################################################
# Standard Python class.
class PyPoint:
def __init__(self, x, y):
self.x = x
self.y = y
# Struct class.
class SPoint(Struct):
# Field declaration order matters.
x = Field # shorthand for "x = Field()"
y = Field
# The constructor is implicitly defined.
# namedtuple class
NTPoint = namedtuple('NTPoint', 'x y')
###############################################################################
# Construction and pretty-printing #
###############################################################################
# Initialization is the same for all three classes.
py_point = PyPoint(1, 2)
struct_point = SPoint(1, 2)
tuple_point = NTPoint(1, 2)
# Structs and namedtuples both have pretty-printing.
print('==== Printing ====')
print(py_point) # <__main__.Pypoint object at ...>
print(struct_point) # SPoint(x=1, y=2)
print(tuple_point) # NTPoint(x=1, y=2)
# Structs print their contents using whichever formatting method
# was called originally. namedtuples always use repr.
struct_point2 = SPoint('a', 'b')
tuple_point2 = NTPoint('a', 'b')
print(str(struct_point2)) # SPoint(a, b)
print(repr(struct_point2)) # SPoint('a', 'b')
print(str(tuple_point2)) # NTPoint('a', 'b')
print(repr(tuple_point2)) # NTPoint('a', 'b')
# All three classes can also be constructed using
# keywords, *args, and **kargs.
py_point2 = PyPoint(1, y=2)
struct_point2 = SPoint(*[1, 2])
tuple_point2 = NTPoint(**{'x': 1, 'y': 2})
###############################################################################
# Equality and hashing #
###############################################################################
# Structs and namedtuples both have structural equality.
print('\n==== Equality ====')
print(py_point == py_point2) # False
print(struct_point == struct_point2) # True
print(tuple_point == tuple_point2) # True
# Structs, unlike namedtuple, are only equal to other
# instances of the same class.
class OtherSPoint(Struct):
x, y = Field, Field
OtherNTPoint = namedtuple('OtherNTPoint', 'x y')
struct_point2 = OtherSPoint(1, 2)
tuple_point2 = OtherNTPoint(1, 2)
print(struct_point == struct_point2) # False
print(tuple_point == tuple_point2) # True
# Structs and namedtuples have hash functions based on
# structural value.
print('\n==== Hashing ====')
print(hash(py_point) == hash(py_point2)) # False (almost certainly)
print(hash(struct_point) == hash(struct_point)) # True
print(hash(tuple_point) == hash(tuple_point2)) # True
###############################################################################
# Other features #
###############################################################################
# Structs implement some of the same convenience methods as namedtuples.
print('\n==== Convenience methods ====')
print(struct_point._asdict()) # OrderedDict([('x', 1), ('y', 2)])
print(tuple_point._asdict()) # OrderedDict([('x', 1), ('y', 2)])
print(struct_point._replace(x=3)) # SPoint(x=3, y=2)
print(tuple_point._replace(x=3)) # NTPoint(x=3, y=2)
# Note that _replace() creates a copy without modifying the original object.
# Both can be iterated over and decomposed into their components.
print(len(struct_point)) # 2
x, y = struct_point
print((x, y)) # (1, 2)
print(len(tuple_point)) # 2
x, y = tuple_point
print((x, y)) # (1, 2)
|
{
"content_hash": "1357dc1a35d496423575b9cd09f0de53",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 37.373831775700936,
"alnum_prop": 0.5136284071017755,
"repo_name": "brandjon/simplestruct",
"id": "d54aeebf21f7e36482a32d6d4d891a3de91e0336",
"size": "3999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/point.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40340"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from lino.api import dd, rt
from lino import mixins
#~ from lino.models import SiteConfig
#~ from lino_xl.lib.contacts import models as contacts
#~ from lino_xl.lib.cal import models as cal
contacts = dd.resolve_app('contacts')
#~ cal = dd.resolve_app('cal')
#~ school = dd.resolve_app('school')
class School(contacts.Company):
class Meta:
#~ app_label = 'school'
verbose_name = _("School")
verbose_name_plural = _("Schools")
class Schools(contacts.Companies):
model = School
class Person(contacts.Person, mixins.Born):
class Meta(contacts.Person.Meta):
app_label = 'contacts'
# ~ # see :srcref:`docs/tickets/14`
#~ verbose_name = _("Person")
#~ verbose_name_plural = _("Persons")
class PersonDetail(contacts.PersonDetail):
#~ contact = contacts.PersonDetail.main
#~ outbox = dd.Panel("""
#~ outbox.MailsByProject
#~ """,label = _("Correspondence"))
#~ calendar = dd.Panel("""
#~ cal.EntriesByProject
#~ cal.TasksByProject
#~ """,label = _("Calendar"))
#~ main = "contact outbox calendar"
main = """
box1 box2
remarks contacts.RolesByPerson households.MembersByPerson
"""
box1 = """
last_name first_name:15 #title:10
country city zip_code:10
#street_prefix street:25 street_no street_box
addr2:40
is_pupil is_teacher
"""
box2 = """
id:12 language
email
phone fax
gsm
gender birth_date age:10
"""
#~ def setup_handle(self,lh):
#~ lh.contact.label = _("Contact")
#~ lh.mails.label = _("Mails")
#~ class Company(contacts.Partner,contacts.CompanyMixin):
#~ class Meta(contacts.CompanyMixin.Meta):
#~ app_label = 'contacts'
# ~ # see :srcref:`docs/tickets/14`
#~ verbose_name = _("Company")
#~ verbose_name_plural = _("Companies")
#~ class Event(cal.Event):
#~ class Meta(cal.Event.Meta):
#~ app_label = 'cal'
#~ class Task(cal.Task):
#~ class Meta(cal.Task.Meta):
#~ app_label = 'cal'
#~ class EventDetail(cal.EventDetail):
#~ class EventDetail(dd.FormLayout):
#~ main = "general more"
#~ lesson = dd.Panel("""
#~ owner start_date start_time end_time place
#~ school.PresencesByEvent
#~ """,label=_("Lesson"))
#~ event = dd.Panel("""
# ~ id:8 user priority access_class transparent #rset
#~ summary state workflow_buttons
#~ calendar created:20 modified:20
#~ description
#~ cal.GuestsByEvent
#~ """,label=_("Event"))
#~ main = "lesson event"
#~ def setup_handle(self,lh):
#~ lh.lesson.label = _("Lesson")
#~ lh.event.label =
#~ lh.notes.label = _("Notes")
@dd.receiver(dd.post_analyze)
def my_details(sender, **kw):
site = sender
site.modules.cal.Events.set_detail_layout('general more')
site.modules.cal.Events.add_detail_panel('general', """
event_type summary user project
start end
room priority access_class transparent #rset
owner workflow_buttons
description cal.GuestsByEvent
""", _("General"))
site.modules.cal.Events.add_detail_panel('more', """
id created:20 modified:20
outbox.MailsByController #postings.PostingsByController
""", _("More"))
# remove `project` field
#~ site.modules.cal.Tasks.set_detail_layout("""
#~ start_date workflow_buttons due_date done user id
#~ summary
#~ calendar owner created:20 modified:20 user_modified
# ~ description #notes.NotesByTask
#~ """)
#~ site.modules.cal.Events.set_detail_layout("general more")
site.modules.cal.Events.set_insert_layout("""
summary
start end
event_type project
""",
start="start_date start_time",
end="end_date end_time",
window_size=(60, 'auto'))
# TODO : move to plugin
def setup_main_menu(config, site, user_type, main):
m = main.get_item("contacts")
m.add_action('homeworkschool.Schools')
def customize_school():
dd.inject_field('courses.Pupil',
'school',
models.ForeignKey(School,
blank=True, null=True,
help_text=_("""The regular school where this child goes.""")
))
customize_school()
|
{
"content_hash": "80993dc5d883c687f1cdfe5b3142e361",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 76,
"avg_line_length": 28.22012578616352,
"alnum_prop": 0.5932694450635169,
"repo_name": "khchine5/book",
"id": "3a05ab716f1a90a322e1b91098613f3c8bceddba",
"size": "4567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_book/projects/homeworkschool/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "Python",
"bytes": "486198"
},
{
"name": "Shell",
"bytes": "702"
}
],
"symlink_target": ""
}
|
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[1]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!s1+ggf&2lnb3hez!b+h@eayngv)#+&w54hb*l*c#eg=_epo2nc')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025 # Maildump
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
|
{
"content_hash": "6bbfb36710d8fe39949b6e6bfa2a4ad5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 110,
"avg_line_length": 30.345454545454544,
"alnum_prop": 0.4865188735769922,
"repo_name": "HelsinkiHacklab/asylum",
"id": "0d89e0e7145d00e0dbc7779198e003b42ae79e06",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/hhl_changes",
"path": "project/config/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31215"
},
{
"name": "Dockerfile",
"bytes": "3192"
},
{
"name": "HTML",
"bytes": "9736"
},
{
"name": "JavaScript",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "223215"
},
{
"name": "Shell",
"bytes": "5899"
}
],
"symlink_target": ""
}
|
from JumpScale import j
import JumpScale.grid.osis
import JumpScale.baselib.redis
import JumpScale.lib.rogerthat
import JumpScale.portal
try:
import ujson as json
except ImportError:
import json
import time
import sys
import os
import inspect
import gevent
class Handler(object):
ORDER = 50
def __init__(self, service):
self.service = service
def start(self):
pass
def updateState(self, alert):
pass
def escalate(self, alert, users):
pass
class AlertService(object):
def __init__(self):
self.rediscl = j.clients.redis.getByInstanceName('system')
self.alertqueue = self.rediscl.getQueue('alerts')
self.alerts_client = j.core.portal.getClientByInstance('main').actors.system.alerts
self.scl = j.core.osis.getClientForNamespace('system')
self.handlers = list()
self.timers = dict()
self.loadHandlers()
def log(self, message, level=1):
j.logger.log(message, level, 'alerter')
def getUsersForLevel(self, level):
groupname = "level%s" % level
users = self.scl.user.search({'groups': {'$all': [groupname, 'alert']}, 'active': True})[1:]
return users
def getUserEmails(self, user):
useremails = user['emails']
if not isinstance(useremails, list):
useremails = [useremails]
return useremails
def loadHandlers(self):
from JumpScale.baselib.alerter import handlers
for name, module in inspect.getmembers(handlers, inspect.ismodule):
for name, klass in inspect.getmembers(module, inspect.isclass):
if issubclass(klass, Handler) and klass is not Handler:
self.handlers.append(klass(self))
self.handlers.sort(key=lambda s: s.ORDER)
def getUrl(self, alert):
return "http://cpu01.bracknell1.vscalers.com:82/grid/alert?id=%(guid)s" % alert
def escalate(self, alert):
level = alert['level']
users = self.getUsersForLevel(level)
for handler in self.handlers:
result = handler.escalate(alert, users)
if result is not None:
users = result
def updateState(self, alert):
for handler in self.handlers:
handler.updateState(alert)
def getAlert(self, id):
return self.scl.alert.get(id).dump()
def escalateHigher(self, alert):
self.timers.pop(alert['guid'], None)
message = "Took too long to be Accepted"
self.log(message + " %s" % alert['guid'])
self.alerts_client.escalate(alert=alert['guid'], comment=message)
def start(self, options):
if options.clean:
lalerts = self.rediscl.hlen('alerts')
self.log("Removing cached alerts: %s" % lalerts)
self.rediscl.delete('alerts')
self.log("Removing alerts queue: %s" % self.alertqueue.qsize())
self.rediscl.delete(self.alertqueue.key)
for handler in self.handlers:
handler.start()
self.restartTimers()
greenlet = gevent.spawn(self.receiveAlerts)
gevent.joinall([greenlet])
def getStateTime(self, alert):
key = "alerter.level%s.%s" % (alert['level'], alert['state'].lower())
if j.application.config.exists(key):
return j.base.time.getDeltaTime(j.application.config.get(key))
def makeTimer(self, alert):
greenlet = self.timers.get(alert['guid'])
if greenlet is not None:
scheduledalert = greenlet.args[0]
if scheduledalert['state'] != alert['state']:
self.log("Removing schedule for alert %s" % scheduledalert['state'])
greenlet.kill()
else:
return
delay = self.getStateTime(alert)
if delay:
self.log("Schedule escalation in %ss for state %s" % (delay, alert['state']))
self.timers[alert['guid']] = gevent.spawn_later(delay, self.escalateHigher, alert)
def restartTimers(self):
now = time.time()
for key, alert in self.rediscl.hgetall('alerts').iteritems():
alert = self.getAlert(key)
if alert['state'] in ('RESOLVED', 'UNRESOLVED'):
self.rediscl.hdel('alerts', key)
else:
alerttime = self.getStateTime(alert)
if not alerttime:
self.rediscl.hdel('alerts', key)
continue
epoch = alert['epoch'] or alert['lasttime']
remainingtime = (epoch + alerttime) - now
if remainingtime > 0:
self.log("Schedule escalation in %ss for state %s" % (remainingtime, alert['state']))
self.timers[alert['guid']] = gevent.spawn_later(remainingtime, self.escalateHigher, alert)
else:
self.escalateHigher(alert)
def receiveAlerts(self):
while True:
alertid = self.alertqueue.get()
alert = self.getAlert(alertid)
oldalert = self.rediscl.hget('alerts', alertid)
self.rediscl.hset('alerts', alert['guid'], json.dumps(alert))
self.log('Got alertid %s' % alertid)
if alert['state'] == 'ALERT':
self.escalate(alert=alert)
elif oldalert:
oldalert = json.loads(oldalert)
if oldalert['state'] == 'ALERT' and alert['state'] == 'ACCEPTED':
alert['message_id'] = oldalert['message_id']
self.updateState(alert)
if alert['state'] in ('RESOLVED', 'UNRESOLVED'):
self.rediscl.hdel('alerts', alert['guid'])
self.makeTimer(alert)
|
{
"content_hash": "85b9185b6f01489fa5027c6487d5d7b1",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 110,
"avg_line_length": 36.100628930817614,
"alnum_prop": 0.5874564459930314,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "08701b11bedcde69eede561ab012948dcbfb805e",
"size": "5740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/alerter/alerts_service.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
r"""Train and export a simple Softmax Regression TensorFlow model.
The model is from the TensorFlow "MNIST For ML Beginner" tutorial. This program
simply follows all its training instructions, and uses TensorFlow SavedModel to
export the trained model with proper signatures that can be loaded by standard
tensorflow_model_server.
Usage: mnist_saved_model.py [--training_iteration=x] [--model_version=y] \
export_dir
"""
from __future__ import print_function
import os
import sys
# This is a placeholder for a Google-internal import.
import tensorflow as tf
import mnist_input_data
tf.app.flags.DEFINE_integer('training_iteration', 1000,
'number of training iterations.')
tf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
print('Usage: mnist_export.py [--training_iteration=x] '
'[--model_version=y] export_dir')
sys.exit(-1)
if FLAGS.training_iteration <= 0:
print('Please specify a positive value for training iteration.')
sys.exit(-1)
if FLAGS.model_version <= 0:
print('Please specify a positive value for version number.')
sys.exit(-1)
# Train model
print('Training model...')
mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
sess = tf.InteractiveSession()
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name
y_ = tf.placeholder('float', shape=[None, 10])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
values, indices = tf.nn.top_k(y, 10)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
tf.constant([str(i) for i in xrange(10)]))
prediction_classes = table.lookup(tf.to_int64(indices))
for _ in range(FLAGS.training_iteration):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('training accuracy %g' % sess.run(
accuracy, feed_dict={
x: mnist.test.images,
y_: mnist.test.labels
}))
print('Done training!')
# Export model
# WARNING(break-tutorial-inline-code): The following code snippet is
# in-lined in tutorials, please update tutorial documents accordingly
# whenever code changes.
export_path_base = sys.argv[-1]
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(FLAGS.model_version)))
print('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# Build the signature_def_map.
classification_inputs = tf.saved_model.utils.build_tensor_info(
serialized_tf_example)
classification_outputs_classes = tf.saved_model.utils.build_tensor_info(
prediction_classes)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values)
classification_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={
tf.saved_model.signature_constants.CLASSIFY_INPUTS:
classification_inputs
},
outputs={
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
classification_outputs_classes,
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
classification_outputs_scores
},
method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))
tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.saved_model.utils.build_tensor_info(y)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': tensor_info_x},
outputs={'scores': tensor_info_y},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_images':
prediction_signature,
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
classification_signature,
},
legacy_init_op=legacy_init_op)
builder.save()
print('Done exporting!')
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "a71dbd925c9b994be59c2d384231ffc6",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 80,
"avg_line_length": 38.396946564885496,
"alnum_prop": 0.6789264413518886,
"repo_name": "penguin138/serving",
"id": "579410e14c1d0c7d40c42185d08ef958429cefc8",
"size": "5733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_serving/example/mnist_saved_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1808685"
},
{
"name": "CSS",
"bytes": "12464"
},
{
"name": "HTML",
"bytes": "3225"
},
{
"name": "Python",
"bytes": "226705"
},
{
"name": "Shell",
"bytes": "1870"
}
],
"symlink_target": ""
}
|
import json
import os
from uuid import uuid4
import multiprocessing
import requests
import time
import sys
from data_connection import DataConnection
class Configurator(object):
"""
Manages a config for the Local Computer
"""
def __init__(self, **kwargs):
if "filename" in kwargs:
with open(kwargs["filename"], 'r') as input:
self.config = json.loads(input.read())
else:
self.config = {
'server': 'http://localhost:8000',
'secret_uuid': '123412341234',
'registration_token': 'abcd',
'name': "a_local_computer"
}
def write_config(self, filename):
"""
Save current config to a json file.
"""
with open(filename, 'w') as output:
output.write(json.dumps(self.config, indent=4))
def get_config(self):
return self.config
def set_config(self, config):
self.config = config
CONFIG_LOCATION = "default.cfg"
def init_configurator(config_location=None):
"""
Register local computer if not done previously.
:return: The configurator for this local computer
"""
CONFIG_LOCATION=config_location
if os.path.isfile(CONFIG_LOCATION):
configurator = Configurator(filename=CONFIG_LOCATION)
print "Found local configurator at {}".format(CONFIG_LOCATION)
else:
configurator = Configurator()
configurator.write_config(CONFIG_LOCATION)
data_connection = DataConnection(configurator)
my_reg_token = str(uuid4())
print "Registering to {} with token {}".format(configurator.get_config()['server'], my_reg_token)
data_connection.register(my_reg_token, CONFIG_LOCATION)
configurator.write_config(CONFIG_LOCATION)
return configurator
def periodic_eval(refresh_time_sec, program, should_stop, shared_val, data_connection):
while not should_stop.value:
try:
eval(compile(program, '<string>', 'exec'))
time.sleep(refresh_time_sec)
except BaseException as e:
print ("Error running uploaded program {}".format(e))
if e.message:
print e.message
return periodic_eval
class WorkerPool(object):
def __init__(self, data_connection):
self.job_list = {}
self.shared_val = multiprocessing.Value('i',0)
self.data_connection = data_connection
def start_program(self, program_id, refresh_time_sec, program):
if program_id not in self.job_list.keys():
should_stop = multiprocessing.Value('b', False)
self.job_list[program_id] = [should_stop,
multiprocessing.Process(target=periodic_eval, args=(
refresh_time_sec,
program,
should_stop,
self.shared_val,
self.data_connection
))]
self.job_list[program_id][1].start()
else:
print "Program id {} already running".format(program_id)
def stop_program(self, program_id):
try:
job = self.job_list[program_id]
self.job_list[program_id][0].value = True
self.job_list[program_id][1].join(20)
del self.job_list[program_id]
print "Stopped job for program id {}".format(program_id)
except BaseException as e:
print "Failed to stop program {}".format(program_id)
def stop(self):
for program_id in self.job_list.keys():
self.stop_program(program_id)
COMMAND_NOOP = 0
COMMAND_DONE = 1
COMMAND_LOAD_PROGRAM = 2
COMMAND_STOP_PROGRAM = 3
class CommandHandler(object):
def __init__(self, worker_pool, data_connection):
self.worker_pool = worker_pool
self.data_connection = data_connection
self.handler_map = {
COMMAND_LOAD_PROGRAM: self.handle_load,
COMMAND_STOP_PROGRAM: self.handle_stop,
}
def handle_commands(self, commands):
done = False
for command in commands:
print ("Rcx command {}".format(command["type"]))
if command['type'] == COMMAND_NOOP:
pass
elif command['type'] == COMMAND_DONE:
done = True
else:
self.handler_map[command['type']](command)
data_connection.deactivate_command(command)
return done
def handle_load(self, command):
program_command = json.loads(command['json_command'])
program = self.data_connection.get_program(program_command['program_id'])
print ("Starting program {}".format(program['id']))
self.worker_pool.start_program(program['id'], program['sleep_time_sec'], program['code'])
def handle_stop(self, command):
program_command = json.loads(command['json_command'])
program = self.data_connection.get_program(program_command['program_id'])
self.worker_pool.stop_program(program['id'])
if __name__ == '__main__':
"""
Main loop. Handle commands until done.
"""
if len(sys.argv) != 2:
print "usage: python base <config file>"
sys.exit()
configurator = init_configurator(sys.argv[1])
data_connection = DataConnection(configurator)
data_connection.update_config(CONFIG_LOCATION)
worker_pool = WorkerPool(data_connection)
command_handler = CommandHandler(worker_pool, data_connection)
done = False
data_connection.set_local_computer_status(is_running=True)
print "Connected to " + configurator.get_config()['server']
print "Polling for commands. <ctrl-c> to exit."
while not done:
try:
commands = data_connection.get_new_commands()
done = command_handler.handle_commands(commands)
time.sleep(configurator.get_config()['command_refresh_sec'])
except KeyboardInterrupt:
print "Stopping"
done = True
worker_pool.stop()
data_connection.set_local_computer_status(is_running=False)
print("got shared_value {}".format(worker_pool.shared_val.value))
print("Received done command. Shutting down.")
|
{
"content_hash": "5c9922aa65cdb031d523407663a50c0b",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 105,
"avg_line_length": 32.64795918367347,
"alnum_prop": 0.5902484763244257,
"repo_name": "BARCproject/barc",
"id": "dc6d5c27d6c34e83f0c46c25a952900f25cb9063",
"size": "6399",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Dator/vm/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37857"
},
{
"name": "C++",
"bytes": "34556"
},
{
"name": "CMake",
"bytes": "25703"
},
{
"name": "CSS",
"bytes": "143"
},
{
"name": "HTML",
"bytes": "27848"
},
{
"name": "JavaScript",
"bytes": "10764902"
},
{
"name": "Julia",
"bytes": "117617"
},
{
"name": "Less",
"bytes": "69047"
},
{
"name": "MATLAB",
"bytes": "9115"
},
{
"name": "Python",
"bytes": "343196"
},
{
"name": "SCSS",
"bytes": "69934"
},
{
"name": "Shell",
"bytes": "13578"
},
{
"name": "Vim script",
"bytes": "370"
}
],
"symlink_target": ""
}
|
"""ACME Identifier Validation Challenges."""
import abc
import functools
import hashlib
import logging
import socket
from cryptography.hazmat.primitives import hashes
import OpenSSL
import requests
from acme import errors
from acme import crypto_util
from acme import fields
from acme import jose
from acme import other
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
@classmethod
def from_json(cls, jobj):
try:
return super(Challenge, cls).from_json(jobj)
except jose.UnrecognizedTypeError as error:
logger.debug(error)
return UnrecognizedChallenge.from_json(jobj)
class ContinuityChallenge(Challenge): # pylint: disable=abstract-method
"""Client validation challenges."""
class DVChallenge(Challenge): # pylint: disable=abstract-method
"""Domain validation challenges."""
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
resource_type = 'challenge'
resource = fields.Resource(resource_type)
class UnrecognizedChallenge(Challenge):
"""Unrecognized challenge.
ACME specification defines a generic framework for challenges and
defines some standard challenges that are implemented in this
module. However, other implementations (including peers) might
define additional challenge types, which should be ignored if
unrecognized.
:ivar jobj: Original JSON decoded object.
"""
def __init__(self, jobj):
super(UnrecognizedChallenge, self).__init__()
object.__setattr__(self, "jobj", jobj)
def to_partial_json(self):
# pylint: disable=no-member
return self.jobj
@classmethod
def from_json(cls, jobj):
return cls(jobj)
class _TokenDVChallenge(DVChallenge):
"""DV Challenge with token.
:ivar bytes token:
"""
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
# TODO: acme-spec doesn't specify token as base64-encoded value
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
# XXX: rename to ~token_good_for_url
@property
def good_token(self): # XXX: @token.decoder
"""Is `token` good?
.. todo:: acme-spec wants "It MUST NOT contain any non-ASCII
characters", but it should also warrant that it doesn't
contain ".." or "/"...
"""
# TODO: check that path combined with uri does not go above
# URI_ROOT_PATH!
return b'..' not in self.token and b'/' not in self.token
class KeyAuthorizationChallengeResponse(ChallengeResponse):
"""Response to Challenges based on Key Authorization.
:param unicode key_authorization:
"""
key_authorization = jose.Field("keyAuthorization")
thumbprint_hash_function = hashes.SHA256
def verify(self, chall, account_public_key):
"""Verify the key authorization.
:param KeyAuthorization chall: Challenge that corresponds to
this response.
:param JWK account_public_key:
:return: ``True`` iff verification of the key authorization was
successful.
:rtype: bool
"""
parts = self.key_authorization.split('.') # pylint: disable=no-member
if len(parts) != 2:
logger.debug("Key authorization (%r) is not well formed",
self.key_authorization)
return False
if parts[0] != chall.encode("token"):
logger.debug("Mismatching token in key authorization: "
"%r instead of %r", parts[0], chall.encode("token"))
return False
thumbprint = jose.b64encode(account_public_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
if parts[1] != thumbprint:
logger.debug("Mismatching thumbprint in key authorization: "
"%r instead of %r", parts[0], thumbprint)
return False
return True
class KeyAuthorizationChallenge(_TokenDVChallenge):
# pylint: disable=abstract-class-little-used,too-many-ancestors
"""Challenge based on Key Authorization.
:param response_cls: Subclass of `KeyAuthorizationChallengeResponse`
that will be used to generate `response`.
"""
__metaclass__ = abc.ABCMeta
response_cls = NotImplemented
thumbprint_hash_function = (
KeyAuthorizationChallengeResponse.thumbprint_hash_function)
def key_authorization(self, account_key):
"""Generate Key Authorization.
:param JWK account_key:
:rtype unicode:
"""
return self.encode("token") + "." + jose.b64encode(
account_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
def response(self, account_key):
"""Generate response to the challenge.
:param JWK account_key:
:returns: Response (initialized `response_cls`) to the challenge.
:rtype: KeyAuthorizationChallengeResponse
"""
return self.response_cls(
key_authorization=self.key_authorization(account_key))
@abc.abstractmethod
def validation(self, account_key, **kwargs):
"""Generate validation for the challenge.
Subclasses must implement this method, but they are likely to
return completely different data structures, depending on what's
necessary to complete the challenge. Interepretation of that
return value must be known to the caller.
:param JWK account_key:
:returns: Challenge-specific validation.
"""
raise NotImplementedError() # pragma: no cover
def response_and_validation(self, account_key, *args, **kwargs):
"""Generate response and validation.
Convenience function that return results of `response` and
`validation`.
:param JWK account_key:
:rtype: tuple
"""
return (self.response(account_key),
self.validation(account_key, *args, **kwargs))
@ChallengeResponse.register
class HTTP01Response(KeyAuthorizationChallengeResponse):
"""ACME http-01 challenge response."""
typ = "http-01"
PORT = 80
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
WHITESPACE_CUTSET = "\n\r\t "
"""Whitespace characters which should be ignored at the end of the body."""
def simple_verify(self, chall, domain, account_public_key, port=None):
"""Simple verify.
:param challenges.SimpleHTTP chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param account_public_key: Public key for the key pair
being authorized. If ``None`` key verification is not
performed!
:param JWK account_public_key:
:param int port: Port used in the validation.
:returns: ``True`` iff validation is successful, ``False``
otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
# TODO: ACME specification defines URI template that doesn't
# allow to use a custom port... Make sure port is not in the
# request URI, if it's standard.
if port is not None and port != self.PORT:
logger.warning(
"Using non-standard port for http-01 verification: %s", port)
domain += ":{0}".format(port)
uri = chall.uri(domain)
logger.debug("Verifying %s at %s...", chall.typ, uri)
try:
http_response = requests.get(uri)
except requests.exceptions.RequestException as error:
logger.error("Unable to reach %s: %s", uri, error)
return False
logger.debug("Received %s: %s. Headers: %s", http_response,
http_response.text, http_response.headers)
challenge_response = http_response.text.rstrip(self.WHITESPACE_CUTSET)
if self.key_authorization != challenge_response:
logger.debug("Key authorization from response (%r) doesn't match "
"HTTP response (%r)", self.key_authorization,
challenge_response)
return False
return True
@Challenge.register # pylint: disable=too-many-ancestors
class HTTP01(KeyAuthorizationChallenge):
"""ACME http-01 challenge."""
response_cls = HTTP01Response
typ = response_cls.typ
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
@property
def path(self):
"""Path (starting with '/') for provisioned resource.
:rtype: string
"""
return '/' + self.URI_ROOT_PATH + '/' + self.encode('token')
def uri(self, domain):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param unicode domain: Domain name being verified.
:rtype: string
"""
return "http://" + domain + self.path
def validation(self, account_key, **unused_kwargs):
"""Generate validation.
:param JWK account_key:
:rtype: unicode
"""
return self.key_authorization(account_key)
@ChallengeResponse.register
class TLSSNI01Response(KeyAuthorizationChallengeResponse):
"""ACME tls-sni-01 challenge response."""
typ = "tls-sni-01"
DOMAIN_SUFFIX = b".acme.invalid"
"""Domain name suffix."""
PORT = 443
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
@property
def z(self): # pylint: disable=invalid-name
"""``z`` value used for verification.
:rtype bytes:
"""
return hashlib.sha256(
self.key_authorization.encode("utf-8")).hexdigest().lower().encode()
@property
def z_domain(self):
"""Domain name used for verification, generated from `z`.
:rtype bytes:
"""
return self.z[:32] + b'.' + self.z[32:] + self.DOMAIN_SUFFIX
def gen_cert(self, key=None, bits=2048):
"""Generate tls-sni-01 certificate.
:param OpenSSL.crypto.PKey key: Optional private key used in
certificate generation. If not provided (``None``), then
fresh key will be generated.
:param int bits: Number of bits for newly generated key.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
if key is None:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return crypto_util.gen_ss_cert(key, [
# z_domain is too big to fit into CN, hence first dummy domain
'dummy', self.z_domain.decode()], force_san=True), key
def probe_cert(self, domain, **kwargs):
"""Probe tls-sni-01 challenge certificate.
:param unicode domain:
"""
# TODO: domain is not necessary if host is provided
if "host" not in kwargs:
host = socket.gethostbyname(domain)
logging.debug('%s resolved to %s', domain, host)
kwargs["host"] = host
kwargs.setdefault("port", self.PORT)
kwargs["name"] = self.z_domain
# TODO: try different methods?
# pylint: disable=protected-access
return crypto_util.probe_sni(**kwargs)
def verify_cert(self, cert):
"""Verify tls-sni-01 challenge certificate.
:param OpensSSL.crypto.X509 cert: Challenge certificate.
:returns: Whether the certificate was successfully verified.
:rtype: bool
"""
# pylint: disable=protected-access
sans = crypto_util._pyopenssl_cert_or_req_san(cert)
logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans)
return self.z_domain.decode() in sans
def simple_verify(self, chall, domain, account_public_key,
cert=None, **kwargs):
"""Simple verify.
Verify ``validation`` using ``account_public_key``, optionally
probe tls-sni-01 certificate and check using `verify_cert`.
:param .challenges.TLSSNI01 chall: Corresponding challenge.
:param str domain: Domain name being validated.
:param JWK account_public_key:
:param OpenSSL.crypto.X509 cert: Optional certificate. If not
provided (``None``) certificate will be retrieved using
`probe_cert`.
:param int port: Port used to probe the certificate.
:returns: ``True`` iff client's control of the domain has been
verified, ``False`` otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
if cert is None:
try:
cert = self.probe_cert(domain=domain, **kwargs)
except errors.Error as error:
logger.debug(error, exc_info=True)
return False
return self.verify_cert(cert)
@Challenge.register # pylint: disable=too-many-ancestors
class TLSSNI01(KeyAuthorizationChallenge):
"""ACME tls-sni-01 challenge."""
response_cls = TLSSNI01Response
typ = response_cls.typ
# boulder#962, ietf-wg-acme#22
#n = jose.Field("n", encoder=int, decoder=int)
def validation(self, account_key, **kwargs):
"""Generate validation.
:param JWK account_key:
:param OpenSSL.crypto.PKey cert_key: Optional private key used
in certificate generation. If not provided (``None``), then
fresh key will be generated.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
return self.response(account_key).gen_cert(key=kwargs.get('cert_key'))
@Challenge.register
class RecoveryContact(ContinuityChallenge):
"""ACME "recoveryContact" challenge.
:ivar unicode activation_url:
:ivar unicode success_url:
:ivar unicode contact:
"""
typ = "recoveryContact"
activation_url = jose.Field("activationURL", omitempty=True)
success_url = jose.Field("successURL", omitempty=True)
contact = jose.Field("contact", omitempty=True)
@ChallengeResponse.register
class RecoveryContactResponse(ChallengeResponse):
"""ACME "recoveryContact" challenge response.
:ivar unicode token:
"""
typ = "recoveryContact"
token = jose.Field("token", omitempty=True)
@Challenge.register
class ProofOfPossession(ContinuityChallenge):
"""ACME "proofOfPossession" challenge.
:ivar .JWAAlgorithm alg:
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar hints: Various clues for the client (:class:`Hints`).
"""
typ = "proofOfPossession"
NONCE_SIZE = 16
class Hints(jose.JSONObjectWithFields):
"""Hints for "proofOfPossession" challenge.
:ivar JWK jwk: JSON Web Key
:ivar tuple cert_fingerprints: `tuple` of `unicode`
:ivar tuple certs: Sequence of :class:`acme.jose.ComparableX509`
certificates.
:ivar tuple subject_key_identifiers: `tuple` of `unicode`
:ivar tuple issuers: `tuple` of `unicode`
:ivar tuple authorized_for: `tuple` of `unicode`
"""
jwk = jose.Field("jwk", decoder=jose.JWK.from_json)
cert_fingerprints = jose.Field(
"certFingerprints", omitempty=True, default=())
certs = jose.Field("certs", omitempty=True, default=())
subject_key_identifiers = jose.Field(
"subjectKeyIdentifiers", omitempty=True, default=())
serial_numbers = jose.Field("serialNumbers", omitempty=True, default=())
issuers = jose.Field("issuers", omitempty=True, default=())
authorized_for = jose.Field("authorizedFor", omitempty=True, default=())
@certs.encoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.encode_cert(cert) for cert in value)
@certs.decoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.decode_cert(cert) for cert in value)
alg = jose.Field("alg", decoder=jose.JWASignature.from_json)
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
hints = jose.Field("hints", decoder=Hints.from_json)
@ChallengeResponse.register
class ProofOfPossessionResponse(ChallengeResponse):
"""ACME "proofOfPossession" challenge response.
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar acme.other.Signature signature: Sugnature of this message.
"""
typ = "proofOfPossession"
NONCE_SIZE = ProofOfPossession.NONCE_SIZE
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
signature = jose.Field("signature", decoder=other.Signature.from_json)
def verify(self):
"""Verify the challenge."""
# self.signature is not Field | pylint: disable=no-member
return self.signature.verify(self.nonce)
@Challenge.register # pylint: disable=too-many-ancestors
class DNS(_TokenDVChallenge):
"""ACME "dns" challenge."""
typ = "dns"
LABEL = "_acme-challenge"
"""Label clients prepend to the domain name being validated."""
def gen_validation(self, account_key, alg=jose.RS256, **kwargs):
"""Generate validation.
:param .JWK account_key: Private account key.
:param .JWA alg:
:returns: This challenge wrapped in `.JWS`
:rtype: .JWS
"""
return jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs)
def check_validation(self, validation, account_public_key):
"""Check validation.
:param JWS validation:
:param JWK account_public_key:
:rtype: bool
"""
if not validation.verify(key=account_public_key):
return False
try:
return self == self.json_loads(
validation.payload.decode('utf-8'))
except jose.DeserializationError as error:
logger.debug("Checking validation for DNS failed: %s", error)
return False
def gen_response(self, account_key, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:param .JWA alg:
:rtype: DNSResponse
"""
return DNSResponse(validation=self.gen_validation(
self, account_key, **kwargs))
def validation_domain_name(self, name):
"""Domain name for TXT validation record.
:param unicode name: Domain name being validated.
"""
return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response.
:param JWS validation:
"""
typ = "dns"
validation = jose.Field("validation", decoder=jose.JWS.from_json)
def check_validation(self, chall, account_public_key):
"""Check validation.
:param challenges.DNS chall:
:param JWK account_public_key:
:rtype: bool
"""
return chall.check_validation(self.validation, account_public_key)
|
{
"content_hash": "74c20a6cd3d197bf22017a0d36633a87",
"timestamp": "",
"source": "github",
"line_count": 645,
"max_line_length": 80,
"avg_line_length": 31.27596899224806,
"alnum_prop": 0.6321320577008873,
"repo_name": "kuba/letsencrypt",
"id": "68bf3fce4f5c7c115ab2502d0f0d15d09185a813",
"size": "20173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/acme/challenges.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4729"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1274554"
},
{
"name": "Shell",
"bytes": "39319"
}
],
"symlink_target": ""
}
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX X XXXXXXXXXX XXXXXX X
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXX X XXXXXX XXXXXX XX XXXXXXXX XXXXXXXXX XXXX XXX XX XXXXXXX XX
XXXXXXXXX X XXXXX XXXX XX XXX XXXXXXXXXX XXX XX XXXXX XXXXXXX XX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXX XXX XXXXXXXXX XXXX XXXX XXXXXXXXXX XXXXXXX XXXX XXXXX XXXXX XX
XXXX XX XXXXXXXX XXX XXXXXXXXXXX XXXXXXX XX XXX XXXXXX XXXXXXXXXXXX XXX XXXXX XXX XXXXXXX XXX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXXX XX XXXXXXX XX XXXX XXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXX XXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXXX XXXXXX XXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXX X XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX XXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
{
"content_hash": "ef1fca411745ff5c932fed55dad8b727",
"timestamp": "",
"source": "github",
"line_count": 816,
"max_line_length": 167,
"avg_line_length": 27.040441176470587,
"alnum_prop": 0.7451167006571493,
"repo_name": "dnaextrim/django_adminlte_x",
"id": "555f187e352ef0018230bdf0d34a58df0ff76b8a",
"size": "22065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adminlte/static/plugins/datatables/extensions/Responsive/examples/styling/compact.html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "487538"
},
{
"name": "HTML",
"bytes": "1939871"
},
{
"name": "JavaScript",
"bytes": "2949324"
},
{
"name": "PHP",
"bytes": "3841"
},
{
"name": "Python",
"bytes": "11030"
}
],
"symlink_target": ""
}
|
from .vkapi import *
from typing import Dict, List, Optional, Callable, Tuple, Union
import dataclasses
from dataclasses import dataclass
@dataclass
class HandleCreateCommand:
"""Information about a handle-creation command
Args:
command: handle creation command (e.g. vkCreateImage)
parent_param: parameter of command for the parent (e.g. device in
vkCreateImage)
create_info: type of the *CreateInfo struct (e.g. VkImageCreateInfo)
create_info_param: parameter of command that holds the pointer to the
create_info struct
pool_member: field of create_info struct that holds the pool for the newly
created handle (e.g. descriptorPool in VkDescriptorSetAllocateInfo)
handle_param: output parameter of command for the newly created handles
is_create: indicates this is a `vkCreate*`-style command--new handle(s)
are created, and they do not belong to any pool
is_pool_allocate: indicates this is a command that creates new handles in
a pool (e.g. vkAllocateDescriptorSets)
is_get: indicates this is a command that returns existing handles, rather
than creating new handles. E.g. `vkGetDeviceQueue` or
`vkEnumeratePhysicalDevices`
"""
command: Command
parent_param: Optional[Field] = None
create_info: Optional[Struct] = None
create_info_param: Optional[Field] = None
pool_member: Optional[Field] = None
handle_param: Optional[Field] = None
is_create: bool = False
is_pool_allocate: bool = False
is_get: bool = False
@dataclass
class HandleDestroyCommand:
"""Information about a handle-destruction command
Args:
command: handle destruction command (e.g. vkDestroyImage)
parent_param: parameter of command that holds the parent (e.g. device in
vkDestroyImage)
pool_param: parameter of command for the pool containing the destroyed
handles (e.g. descriptorPool in vkFreeDescriptorSets). None for handles
that don't belong to pools
handle_param: parameter of command for the handle(s) being destroyed
"""
command: Command
parent_param: Optional[Field]
pool_param: Optional[Field]
handle_param: Optional[Field]
@dataclass
class HandleInfo:
"""Information about the commands that create/destroy a handle type
Args:
handle: handle type
parent: parent handle type (e.g. VkDevice is the parent for VkImage)
pool: pool handle type (e.g. VkDescriptorPool is the pool for
VkDescriptorSet)
pool_elem: handle type of elements in *this* pool (e.g. VkDescriptorSet is
the pool_elem for VkDescriptorPool). None for non-pool handles.
create_cmds: list of commands that create this handle type
destroy_cmd: command that destroys this handle type
reset_pool_cmd: command that destroys all handles of this type within a
pool (e.g. vkResetDescriptorPool is reset_pool_cmd for VkDescriptorSet)
*Note:* vkResetCommandPool is *not* reset_pool_cmd for VkCommandBuffer,
because vkResetCommandPool does not free the command buffers, merely
moves the command buffers back to the initial state.
object_type: the VkObjectType enum value for this handle type
"""
handle: Handle
parent: Optional[Handle] = None
pool: Optional[Handle] = None
pool_elem: Optional[Handle] = None
create_cmds: List[HandleCreateCommand] = dataclasses.field(
default_factory=list)
destroy_cmd: Optional[HandleDestroyCommand] = None
reset_pool_cmd: Optional[HandleDestroyCommand] = None
object_type: Optional[EnumValue] = None
class HandleInfoGlobals:
"""Functions for templates to access information about handles
Example Usage:
env = vkapi.JinjaEnvironment(registry)
handle_infos = vkapi.HandleInfoGlobals(registry)
env.globals.update(handle_infos.globals)
"""
def __init__(self, registry: Registry):
self._registry = registry
self._build_handle_infos()
@property
def globals(self) -> Dict[str, Callable]:
"""Returns a dict to add to the jinja Environment globals"""
return {
f: getattr(self, f)
for f in dir(self)
if not f.startswith('_') and f != 'globals'
}
def handle_info(self, h: Union[str, Handle]) -> Optional[HandleInfo]:
"""Find HandleInfo for a given handle type"""
if isinstance(h, Handle):
h = h.name
return self._handle_infos.get(h)
def command_handle_created(
self, cmd: Command
) -> Tuple[Optional[HandleInfo], Optional[HandleCreateCommand]]:
"""Find info about the handle type created by a command
Returns: A pair (handle_info, handle_create_command). If cmd is not
a handle-creation command, both handle_info and handle_create_command
are None
"""
for p in cmd.parameters:
if not (isinstance(p.type, Pointer) or isinstance(p.type, DynamicArray)):
continue
if p.type.is_const:
continue
info = self.handle_info(p.type.base_type)
if info is None:
continue
for create_cmd in info.create_cmds:
if cmd is create_cmd.command and p is create_cmd.handle_param:
return info, create_cmd
continue
return None, None
def is_create_command(self, cmd: Command) -> bool:
"""Indicates whether the command is a handle-creation command
E.g. vkCreateImage
"""
info, create_cmd = self.command_handle_created(cmd)
if create_cmd is not None:
return create_cmd.is_create
else:
return False
def is_pool_allocate_command(self, cmd: Command) -> bool:
"""Indicates whether the command allocates handles in a pool
E.g. vkAllocateDescriptorSets
"""
info, create_cmd = self.command_handle_created(cmd)
if create_cmd is not None:
return create_cmd.is_pool_allocate
else:
return False
def is_get_command(self, cmd: Command) -> bool:
"""Indicates whether the command gets existing handles
E.g. vkGetDeviceQueue or vkEnumeratePhysicalDevices
"""
info, create_cmd = self.command_handle_created(cmd)
if create_cmd is not None:
return create_cmd.is_get
else:
return False
def command_handle_destroyed(self, cmd: Command) -> Optional[HandleInfo]:
"""Find info about the handle type destroyed by a command
Returns: Info about the destroyed handle type, or None if the command is
not a handle-destruction command.
"""
for p in cmd.parameters:
t = p.type
if isinstance(t, DynamicArray):
t = t.base_type
if not isinstance(t, Handle):
continue
info = self.handle_info(t)
if info is None:
continue
if info.destroy_cmd is not None and cmd is info.destroy_cmd.command:
return info
def is_destroy_command(self, cmd: Command) -> bool:
"""Indicates whether the command is a handle-destruction command"""
return self.command_handle_destroyed(cmd) is not None
def command_pool_reset(
self, cmd: Command) -> Tuple[Optional[HandleInfo], Optional[HandleInfo]]:
"""Find info about the handle type destroyed by a pool reset command
Returns: Info about the destroyed handle type, or None if the command is
not a pool reset command.
E.g., for command_pool_reset(vkResetDescriptorPool) returns VkDescriptorSet
"""
for p in cmd.parameters:
pool_info = self.handle_info(p.type)
if pool_info is None or pool_info.pool_elem is None:
continue
elem_info = self.handle_info(pool_info.pool_elem)
if elem_info.reset_pool_cmd is not None and cmd is elem_info.reset_pool_cmd.command:
return pool_info, elem_info
return None, None
def is_reset_pool_command(self, cmd: Command) -> bool:
"""Indicates whether the command is a pool-reset command"""
pool_info, elem_info = self.command_pool_reset(cmd)
return elem_info is not None
def _build_handle_infos(self) -> None:
registry = self._registry
self._handle_infos = {}
for name, cmd in registry.commands.items():
if cmd.name != name:
continue # alias
if name.startswith('vkCreate') or name.startswith('vkAllocate'):
self._add_handle_create_command(name)
self._add_handle_create_command('vkEnumeratePhysicalDevices')
self._add_handle_create_command('vkGetDeviceQueue')
self._add_handle_create_command('vkGetDeviceQueue2')
self._add_handle_create_command('vkGetSwapchainImagesKHR')
self._add_handle_create_command('vkGetDisplayPlaneSupportedDisplaysKHR')
for name, t in registry.types.items():
if t.name != name:
continue # alias
if isinstance(t, Handle):
if name not in self._handle_infos:
print(f'Warning: Could not find create command for {name}')
info = HandleInfo(
handle=t,
parent=t.parent,
)
self._handle_infos[name] = info
for name, cmd in registry.commands.items():
if cmd.name != name:
continue # alias
if name.startswith('vkDestroy'):
self._add_handle_destroy_command(name)
elif name.startswith('vkFree'):
self._add_handle_free_command(name)
self._add_handle_reset_command('vkResetDescriptorPool')
VkObjectType = registry.types['VkObjectType']
for v in VkObjectType.values.values():
if isinstance(v, TypeAlias):
continue
if v.name == 'VK_OBJECT_TYPE_UNKNOWN':
continue
handle_name = v.comment
if handle_name not in self._handle_infos:
words = v.name[len("VK_OBJECT_TYPE_"):].split('_')
words = [w.capitalize() for w in words]
if words[-1].upper() in {'EXT', 'KHR'}:
words[-1] = words[-1].upper()
handle_name = "Vk" + ''.join(words)
self._handle_infos[handle_name].object_type = v
for info in self._handle_infos.values():
if info.object_type is None:
print(f'ERROR: No VkObjectType found for {info.handle.name}')
assert (info.object_type is not None)
for info in self._handle_infos.values():
if info.pool:
self._handle_infos[info.pool.name].pool_elem = info.handle
def _add_handle_create_command(self, name: str) -> None:
cmd = self._registry.commands[name]
parent_param = None
create_info_param = None
handle_param = None
VkAllocationCallbacks = self._registry.types['VkAllocationCallbacks']
for p in cmd.parameters:
is_pointer = isinstance(p.type, Pointer) or isinstance(
p.type, DynamicArray)
is_pointer_to_struct = is_pointer and isinstance(p.type.base_type, Struct)
is_pointer_to_handle = is_pointer and isinstance(p.type.base_type, Handle)
if isinstance(p.type, Handle):
if parent_param is None:
parent_param = p
if is_pointer_to_struct and p.type.is_const and p.type.base_type != VkAllocationCallbacks:
assert (create_info_param is None)
create_info_param = p
elif is_pointer_to_handle and not p.type.is_const:
assert (handle_param is None)
handle_param = p
if handle_param is None:
print(f'Warning: no handle parameter found for {cmd.name}. Skipping.')
return
assert (parent_param is not None or name == 'vkCreateInstance')
assert (handle_param is not None)
parent = parent_param.type if parent_param is not None else None
create_info = create_info_param.type.base_type if create_info_param is not None else None
pool_member = None
is_create = name.startswith('vkCreate')
is_pool_allocate = False
pool = None
if name.startswith('vkAllocate'):
for m in create_info.members:
if isinstance(m.type, Handle):
assert (pool_member is None)
pool_member = m
if pool_member is not None:
is_pool_allocate = True
pool = pool_member.type
else:
# vkAllocateMemory is actually a create command, not a pool allocation command
is_create = True
is_get = not (is_create or is_pool_allocate)
handle = handle_param.type.base_type
create_cmd = HandleCreateCommand(
command=cmd,
parent_param=parent_param,
pool_member=pool_member,
create_info=create_info,
create_info_param=create_info_param,
handle_param=handle_param,
is_create=is_create,
is_pool_allocate=is_pool_allocate,
is_get=is_get,
)
if handle.name in self._handle_infos:
info = self._handle_infos[handle.name]
info.create_cmds.append(create_cmd)
assert (info.handle is handle)
assert (info.parent is parent)
assert (info.pool is pool)
else:
info = HandleInfo(
handle=handle,
parent=parent,
pool=pool,
create_cmds=[create_cmd],
)
self._handle_infos[handle.name] = info
def _add_handle_destroy_command(self, name: str) -> None:
cmd = self._registry.commands[name]
params = list(cmd.parameters)
assert (2 <= len(params) and len(params) <= 3)
parent_param = None
if len(params) == 3:
parent_param = params[0]
params = params[1:]
parent = parent_param.type
assert (isinstance(parent, Handle))
handle_param = params[0]
handle = handle_param.type
assert (isinstance(handle, Handle))
alloc_param = params[1]
assert (isinstance(alloc_param.type, Pointer))
assert (isinstance(alloc_param.type.base_type, Struct))
info = self._handle_infos[handle.name]
assert (info.destroy_cmd is None)
info.destroy_cmd = HandleDestroyCommand(
command=cmd,
parent_param=parent_param,
pool_param=None,
handle_param=handle_param,
)
def _add_handle_free_command(self, name: str) -> None:
if name == 'vkFreeMemory':
self._add_handle_destroy_command(name)
return
cmd = self._registry.commands[name]
assert (len(cmd.parameters) == 4)
parent_param = cmd.parameters[0]
parent = parent_param.type
assert (isinstance(parent, Handle))
pool_param = cmd.parameters[1]
pool = pool_param.type
assert (isinstance(pool, Handle))
count_param = cmd.parameters[2]
count = count_param.type
assert (count.name == 'uint32_t')
handle_param = cmd.parameters[3]
assert (isinstance(handle_param.type, DynamicArray))
handle = handle_param.type.base_type
assert (isinstance(handle, Handle))
info = self._handle_infos[handle.name]
assert (info.destroy_cmd is None)
info.destroy_cmd = HandleDestroyCommand(
command=cmd,
parent_param=parent_param,
pool_param=pool_param,
handle_param=handle_param,
)
def _add_handle_reset_command(self, name: str) -> None:
cmd = self._registry.commands[name]
assert (len(cmd.parameters) >= 2)
parent_param = cmd.parameters[0]
parent = parent_param.type
assert (isinstance(parent, Handle))
pool_param = cmd.parameters[1]
pool = pool_param.type
if not isinstance(pool, Handle):
return
handle = None
for info in self._handle_infos.values():
if info.pool is not None and info.pool.name == pool.name:
handle = info.handle
break
if handle is None:
return
assert (info.pool.name == pool.name)
assert (info.reset_pool_cmd is None)
info.reset_pool_cmd = HandleDestroyCommand(
command=cmd,
parent_param=parent_param,
pool_param=pool_param,
handle_param=None,
)
|
{
"content_hash": "fb998ff6f914fa4e59c587d539373cc2",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 96,
"avg_line_length": 34.986363636363635,
"alnum_prop": 0.6681174483565026,
"repo_name": "googlestadia/vkspecgen",
"id": "44cffd224d6034b6297d31803745b51e9a6a7ad2",
"size": "15975",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "vkapi/handle_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82218"
},
{
"name": "Shell",
"bytes": "1230"
},
{
"name": "Starlark",
"bytes": "1201"
}
],
"symlink_target": ""
}
|
import tkinter
from time import strftime
class Relogio(tkinter.Label):
def __init__(self):
tkinter.Label.__init__(self)
self.pack()
self['text'] = strftime('%H:%M:%S')
self['font'] = 'Helvetica 120 bold'
self.tictac()
def tictac(self):
agora = strftime('%H:%M:%S')
if agora != self['text']:
self['text'] = agora
self.after(100, self.tictac)
rel = Relogio()
rel.mainloop()
|
{
"content_hash": "74bd840a9992188ba6f0241f6f3507b8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 43,
"avg_line_length": 21.952380952380953,
"alnum_prop": 0.544468546637744,
"repo_name": "pythonprobr/oopy",
"id": "e1ac857c5d9bb0971f1143ca7b94707af7fd8cd8",
"size": "461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "relogio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15247"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="orientation",
parent_name="scatterpolargl.marker.colorbar",
**kwargs,
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["h", "v"]),
**kwargs,
)
|
{
"content_hash": "ff000592a6a63617414f6a8b9a245122",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 31.058823529411764,
"alnum_prop": 0.5814393939393939,
"repo_name": "plotly/plotly.py",
"id": "31206068114b584c73ff7c900f47be5ba3da5020",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/_orientation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from akanda.rug.openstack.common.gettextutils import _
from akanda.rug.openstack.common import log as logging
from akanda.rug.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
def notify(context, message):
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
LOG.deprecated(_("The rabbit_notifier is now deprecated."
" Please use rpc_notifier instead."))
rpc_notifier.notify(context, message)
|
{
"content_hash": "b8123d6db678ca42ee9bac3005f4bed6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 36.07692307692308,
"alnum_prop": 0.7249466950959488,
"repo_name": "dreamhost/akanda-rug",
"id": "2718971db287f4908a88de3a40f457e5ecc6be7a",
"size": "1707",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "akanda/rug/openstack/common/notifier/rabbit_notifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "742381"
}
],
"symlink_target": ""
}
|
import pytest
from xpaw.spider import Spider
from xpaw import events
from .crawler import Crawler
class FooSpider(Spider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = self.config['data']
def open(self):
super().open()
self.data['open'] = ''
def close(self):
super().close()
self.data['close'] = ''
@pytest.mark.asyncio
async def test_spider():
data = {}
crawler = Crawler(data=data)
spider = FooSpider.from_crawler(crawler)
await crawler.event_bus.send(events.crawler_start)
with pytest.raises(NotImplementedError):
spider.start_requests()
with pytest.raises(NotImplementedError):
spider.parse(None)
await crawler.event_bus.send(events.crawler_shutdown)
assert 'open' in data and 'close' in data
|
{
"content_hash": "659187f735aef4778f432de798ff01ca",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 57,
"avg_line_length": 25.029411764705884,
"alnum_prop": 0.6415981198589894,
"repo_name": "jadbin/xpaw",
"id": "5cf0cfb5ecc23fae00721131418e8865264d504b",
"size": "867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_spider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1110"
},
{
"name": "Makefile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "145301"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 1f9c61031fa
Revises: 1f872d11bbf
Create Date: 2016-01-24 17:46:54.879784
"""
# revision identifiers, used by Alembic.
revision = '1f9c61031fa'
down_revision = '1f872d11bbf'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('setting', sa.Column('key', sa.String(length=100)))
op.alter_column('setting', 'name',
existing_type=sa.VARCHAR(length=100),
nullable=True)
op.drop_column('setting', 'id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('setting', sa.Column('id', sa.INTEGER(), nullable=False))
op.alter_column('setting', 'name',
existing_type=sa.VARCHAR(length=100),
nullable=False)
op.drop_column('setting', 'key')
### end Alembic commands ###
|
{
"content_hash": "5311c699fea0924189c0e76e68e34ea6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 27.176470588235293,
"alnum_prop": 0.658008658008658,
"repo_name": "Encrylize/flask-blogger",
"id": "758784e458a4c71a4f0a971a3f0d56da34059173",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/1f9c61031fa_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "478"
},
{
"name": "HTML",
"bytes": "14166"
},
{
"name": "JavaScript",
"bytes": "53574"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "55355"
}
],
"symlink_target": ""
}
|
import webob
from nova.api.openstack.compute.contrib import server_groups
from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
from nova.api.openstack import extensions
from nova import context
import nova.db
from nova import exception
from nova import objects
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
FAKE_UUID3 = 'b8713410-9ba3-e913-901b-13410ca90121'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_resp_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
return sgroup
def server_group_db(sg):
attrs = sg.copy()
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = 'user_id'
if 'project_id' not in attrs:
attrs['project_id'] = 'project_id'
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
def _setup_controller(self):
self.controller = sg_v3.ServerGroupController()
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies):
sgroup = server_group_template()
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal([policy])
def _create_instance(self, context):
instance = objects.Instance(context=context, image_ref=1, node='node1',
reservation_id='a', host='host1', project_id='fake',
vm_state='fake', system_metadata={'key': 'value'})
instance.create()
return instance
def _create_instance_group(self, context, members):
ig = objects.InstanceGroup(context=context, name='fake_name',
user_id='fake_user', project_id='fake',
members=members)
ig.create()
return ig.uuid
def _create_groups_and_instances(self, ctx):
instances = [self._create_instance(ctx), self._create_instance(ctx)]
members = [instance.uuid for instance in instances]
ig_uuid = self._create_instance_group(ctx, members)
return (ig_uuid, instances, members)
def test_display_members(self):
ctx = context.RequestContext('fake_user', 'fake')
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(2, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_active_members_only(self):
ctx = context.RequestContext('fake_user', 'fake')
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
instances[1].destroy()
# check that the instance does not exist
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(1, len(result_members))
self.assertIn(instances[0].uuid, result_members)
def test_create_server_group_with_illegal_name(self):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_list_server_group_by_tenant(self):
groups = []
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
sg1 = server_group_resp_template(id=str(1345),
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=str(891),
name=names[1],
policies=policies,
members=members,
metadata=metadata)
groups = [sg1, sg2]
expected = {'server_groups': groups}
def return_server_groups(context, project_id):
return [server_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
return_server_groups)
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
def test_list_server_group_all(self):
all_groups = []
tenant_groups = []
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
sg1 = server_group_resp_template(id=str(1345),
name=names[0],
policies=[],
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=str(891),
name=names[1],
policies=policies,
members=members,
metadata={})
tenant_groups = [sg2]
all_groups = [sg1, sg2]
all = {'server_groups': all_groups}
tenant_specific = {'server_groups': tenant_groups}
def return_all_server_groups(context):
return [server_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'instance_group_get_all',
return_all_server_groups)
def return_tenant_server_groups(context, project_id):
return [server_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
return_tenant_server_groups)
path = '/os-server-groups?all_projects=True'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
req = fakes.HTTPRequest.blank(path)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
def test_delete_server_group_by_id(self):
sg = server_group_template(id='123')
self.called = False
def server_group_delete(context, id):
self.called = True
def return_server_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return server_group_db(sg)
self.stubs.Set(nova.db, 'instance_group_delete',
server_group_delete)
self.stubs.Set(nova.db, 'instance_group_get',
return_server_group)
resp = self.controller.delete(self.req, '123')
self.assertTrue(self.called)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, sg_v3.ServerGroupController):
status_int = self.controller.delete.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, 'invalid')
class ServerGroupTestV2(ServerGroupTestV21):
validation_error = webob.exc.HTTPBadRequest
def _setup_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {}
self.controller = server_groups.ServerGroupController(ext_mgr)
|
{
"content_hash": "bed16061068b677941038a050524c07b",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 79,
"avg_line_length": 40.0919881305638,
"alnum_prop": 0.581896232699282,
"repo_name": "orbitfp7/nova",
"id": "69642119106271f93f18f753d4e03a49096ccfdf",
"size": "14150",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3272"
},
{
"name": "Python",
"bytes": "15640028"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "XML",
"bytes": "45493"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.shortcuts import HttpResponseRedirect
from . import NEO
from . import get_online_data
# Create your views here.
from .NEO import Get_UserData
def index(request):
return render(request,'index.html',{'title':'Welcome To Train Prediction'})
def handle_query(request):
if request.method == 'POST':
HttpResponseRedirect("/predict")
else:
return render(request, 'index.html', {'title': 'Welcome To Train Prediction'})
def result(request):
l = Get_UserData()
train_no = l[0]
date = l[1]
myneon = NEO.Neon_Engine()
myneon.getdata_and_train(train_no=train_no)
res = myneon.predict_delay(data=date)
mymsg = ' '
if res == 0:
mymsg = 'Delay'
elif res == 2:
mymsg = 'Before'
else:
mymsg = 'On Time'
return render(request, 'result.html', {'title': 'Status',
'result': mymsg}
)
|
{
"content_hash": "b99595e596c40a0975945390df101786",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 86,
"avg_line_length": 26.35135135135135,
"alnum_prop": 0.602051282051282,
"repo_name": "abhi98khandelwal/HINT",
"id": "c380dee774a31ea073544513ca26d792a56ffbb3",
"size": "975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HintApp/predictor/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "5229"
},
{
"name": "Python",
"bytes": "14897"
}
],
"symlink_target": ""
}
|
import pytest
from tests.support.asserts import assert_error, assert_success
from tests.support.image import png_dimensions
from . import element_dimensions
def take_element_screenshot(session, element_id):
return session.transport.send(
"GET",
"session/{session_id}/element/{element_id}/screenshot".format(
session_id=session.session_id,
element_id=element_id,
)
)
def test_no_top_browsing_context(session, closed_window):
response = take_element_screenshot(session, "foo")
assert_error(response, "no such window")
def test_no_browsing_context(session, closed_frame, inline):
session.url = inline("<input>")
element = session.find.css("input", all=False)
response = take_element_screenshot(session, element.id)
screenshot = assert_success(response)
assert png_dimensions(screenshot) == element_dimensions(session, element)
@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
def test_stale_element_reference(session, stale_element, as_frame):
element = stale_element("<input>", "input", as_frame=as_frame)
result = take_element_screenshot(session, element.id)
assert_error(result, "stale element reference")
def test_format_and_dimensions(session, inline):
session.url = inline("<input>")
element = session.find.css("input", all=False)
response = take_element_screenshot(session, element.id)
screenshot = assert_success(response)
assert png_dimensions(screenshot) == element_dimensions(session, element)
|
{
"content_hash": "2cd711e4d9756dcfdf9ae2dbcbc1bb0a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 89,
"avg_line_length": 32.8125,
"alnum_prop": 0.7092063492063492,
"repo_name": "nwjs/chromium.src",
"id": "79ffa15b7461ac6e2ae7087090fafc0ae91cd4fe",
"size": "1575",
"binary": false,
"copies": "3",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/webdriver/tests/take_element_screenshot/screenshot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.