repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
talhajaved/nyuadmarket | flask/lib/python2.7/site-packages/sqlalchemy/orm/identity.py | 21 | 7091 | # orm/identity.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import weakref
from . import attributes
from .. import util
class IdentityMap(dict):
def __init__(self):
self._modified = set()
self._wr = weakref.ref(self)
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def update(self, dict):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __init__(self):
IdentityMap.__init__(self)
def __getitem__(self, key):
state = dict.__getitem__(self, key)
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if dict.__contains__(self, key):
state = dict.__getitem__(self, key)
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
return dict.get(self, state.key) is state
def replace(self, state):
if dict.__contains__(self, state.key):
existing = dict.__getitem__(self, state.key)
if existing is not state:
self._manage_removed_state(existing)
else:
return
dict.__setitem__(self, state.key, state)
self._manage_incoming_state(state)
def add(self, state):
key = state.key
# inline of self.__contains__
if dict.__contains__(self, key):
try:
existing_state = dict.__getitem__(self, key)
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise AssertionError(
"A conflicting state is already "
"present in the identity map for key %r"
% (key, ))
else:
return
except KeyError:
pass
dict.__setitem__(self, key, state)
self._manage_incoming_state(state)
def get(self, key, default=None):
state = dict.get(self, key, default)
if state is default:
return default
o = state.obj()
if o is None:
return default
return o
def _items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def _values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
if util.py2k:
items = _items
values = _values
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
else:
def items(self):
return iter(self._items())
def values(self):
return iter(self._values())
def all_states(self):
if util.py2k:
return dict.values(self)
else:
return list(dict.values(self))
def discard(self, state):
st = dict.get(self, state.key, None)
if st is state:
dict.pop(self, state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self and
attributes.instance_state(self[state.key]) is state)
def replace(self, state):
if dict.__contains__(self, state.key):
existing = dict.__getitem__(self, state.key)
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
dict.__setitem__(self, state.key, state.obj())
self._manage_incoming_state(state)
def add(self, state):
if state.key in self:
if attributes.instance_state(
dict.__getitem__(
self,
state.key)) is not state:
raise AssertionError('A conflicting state is already '
'present in the identity map for key %r'
% (state.key, ))
else:
dict.__setitem__(self, state.key, state.obj())
self._manage_incoming_state(state)
def discard(self, state):
obj = dict.get(self, state.key, None)
if obj is not None:
st = attributes.instance_state(obj)
if st is state:
dict.pop(self, state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
dict.clear(self)
dict.update(self, keepers)
self.modified = bool(dirty)
return ref_count - len(self)
| mit |
steveb/heat | heat/api/openstack/v1/actions.py | 8 | 2608 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from webob import exc
from heat.api.openstack.v1 import util
from heat.common.i18n import _
from heat.common import serializers
from heat.common import wsgi
from heat.rpc import client as rpc_client
class ActionController(object):
"""WSGI controller for Actions in Heat v1 API.
Implements the API for stack actions
"""
# Define request scope (must match what is in policy.json)
REQUEST_SCOPE = 'actions'
ACTIONS = (
SUSPEND, RESUME, CHECK, CANCEL_UPDATE
) = (
'suspend', 'resume', 'check', 'cancel_update'
)
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
@util.identified_stack
def action(self, req, identity, body=None):
"""Performs a specified action on a stack.
The body is expecting to contain exactly one item whose key specifies
the action.
"""
body = body or {}
if len(body) < 1:
raise exc.HTTPBadRequest(_("No action specified"))
if len(body) > 1:
raise exc.HTTPBadRequest(_("Multiple actions specified"))
ac = next(six.iterkeys(body))
if ac not in self.ACTIONS:
raise exc.HTTPBadRequest(_("Invalid action %s specified") % ac)
if ac == self.SUSPEND:
self.rpc_client.stack_suspend(req.context, identity)
elif ac == self.RESUME:
self.rpc_client.stack_resume(req.context, identity)
elif ac == self.CHECK:
self.rpc_client.stack_check(req.context, identity)
elif ac == self.CANCEL_UPDATE:
self.rpc_client.stack_cancel_update(req.context, identity)
else:
raise exc.HTTPInternalServerError(_("Unexpected action %s") % ac)
def create_resource(options):
"""Actions action factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = serializers.JSONResponseSerializer()
return wsgi.Resource(ActionController(options), deserializer, serializer)
| apache-2.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/idlelib/SearchDialog.py | 12 | 2630 | from tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def _setup(text):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_searchdialog"):
engine._searchdialog = SearchDialog(root, engine)
return engine._searchdialog
def find(text):
pat = text.get("sel.first", "sel.last")
return _setup(text).open(text,pat)
def find_again(text):
return _setup(text).find_again(text)
def find_selection(text):
return _setup(text).find_selection(text)
class SearchDialog(SearchDialogBase):
def create_widgets(self):
SearchDialogBase.create_widgets(self)
self.make_button("Find Next", self.default_command, 1)
def default_command(self, event=None):
if not self.engine.getprog():
return
self.find_again(self.text)
def find_again(self, text):
if not self.engine.getpat():
self.open(text)
return False
if not self.engine.getprog():
return False
res = self.engine.search_text(text)
if res:
line, m = res
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
try:
selfirst = text.index("sel.first")
sellast = text.index("sel.last")
if selfirst == first and sellast == last:
text.bell()
return False
except TclError:
pass
text.tag_remove("sel", "1.0", "end")
text.tag_add("sel", first, last)
text.mark_set("insert", self.engine.isback() and first or last)
text.see("insert")
return True
else:
text.bell()
return False
def find_selection(self, text):
pat = text.get("sel.first", "sel.last")
if pat:
self.engine.setcookedpat(pat)
return self.find_again(text)
def _search_dialog(parent):
root = Tk()
root.title("Test SearchDialog")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = Text(root)
text.pack()
text.insert("insert","This is a sample string.\n"*10)
def show_find():
text.tag_add(SEL, "1.0", END)
s = _setup(text)
s.open(text)
text.tag_remove(SEL, "1.0", END)
button = Button(root, text="Search", command=show_find)
button.pack()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_search_dialog)
| apache-2.0 |
standak3/ElementalX_4.4.2 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
wangyum/beam | sdks/python/apache_beam/runners/dataflow/internal/dependency.py | 1 | 26995 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for installing custom code and required dependencies.
Workflows, with the exception of very simple ones, are organized in multiple
modules and packages. Typically, these modules and packages have
dependencies on other standard libraries. Dataflow relies on the Python
setuptools package to handle these scenarios. For further details please read:
https://pythonhosted.org/an_example_pypi_project/setuptools.html
When a runner tries to run a pipeline it will check for a --requirements_file
and a --setup_file option.
If --setup_file is present then it is assumed that the folder containing the
file specified by the option has the typical layout required by setuptools and
it will run 'python setup.py sdist' to produce a source distribution. The
resulting tarball (a .tar or .tar.gz file) will be staged at the GCS staging
location specified as job option. When a worker starts it will check for the
presence of this file and will run 'easy_install tarball' to install the
package in the worker.
If --requirements_file is present then the file specified by the option will be
staged in the GCS staging location. When a worker starts it will check for the
presence of this file and will run 'pip install -r requirements.txt'. A
requirements file can be easily generated by running 'pip freeze -r
requirements.txt'. The reason a Dataflow runner does not run this automatically
is because quite often only a small fraction of the dependencies present in a
requirements.txt file are actually needed for remote execution and therefore a
one-time manual trimming is desirable.
TODO(silviuc): Staged files should have a job specific prefix.
To prevent several jobs in the same project stomping on each other due to a
shared staging location.
TODO(silviuc): Should we allow several setup packages?
TODO(silviuc): We should allow customizing the exact command for setup build.
"""
import functools
import glob
import logging
import os
import re
import shutil
import sys
import tempfile
import pkg_resources
from apache_beam import version as beam_version
from apache_beam.internal import pickler
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.runners.dataflow.internal import names
from apache_beam.utils import processes
# All constants are for internal use only; no backwards-compatibility
# guarantees.
# In a released version BEAM_CONTAINER_VERSION and BEAM_FNAPI_CONTAINER_VERSION
# should match each other, and should be in the same format as the SDK version
# (i.e. MAJOR.MINOR.PATCH). For non-released (dev) versions, read below.
# Update this version to the next version whenever there is a change that will
# require changes to legacy Dataflow worker execution environment.
# This should be in the beam-[version]-[date] format, date is optional.
BEAM_CONTAINER_VERSION = 'beam-2.2.0-20170928'
# Update this version to the next version whenever there is a change that
# requires changes to SDK harness container or SDK harness launcher.
# This should be in the beam-[version]-[date] format, date is optional.
BEAM_FNAPI_CONTAINER_VERSION = 'beam-2.1.0-20170621'
# Standard file names used for staging files.
WORKFLOW_TARBALL_FILE = 'workflow.tar.gz'
REQUIREMENTS_FILE = 'requirements.txt'
EXTRA_PACKAGES_FILE = 'extra_packages.txt'
# Package names for different distributions
GOOGLE_PACKAGE_NAME = 'google-cloud-dataflow'
BEAM_PACKAGE_NAME = 'apache-beam'
# SDK identifiers for different distributions
GOOGLE_SDK_NAME = 'Google Cloud Dataflow SDK for Python'
BEAM_SDK_NAME = 'Apache Beam SDK for Python'
DATAFLOW_CONTAINER_IMAGE_REPOSITORY = 'dataflow.gcr.io/v1beta3'
def _dependency_file_copy(from_path, to_path):
"""Copies a local file to a GCS file or vice versa."""
logging.info('file copy from %s to %s.', from_path, to_path)
if from_path.startswith('gs://') or to_path.startswith('gs://'):
from apache_beam.io.gcp import gcsio
if from_path.startswith('gs://') and to_path.startswith('gs://'):
# Both files are GCS files so copy.
gcsio.GcsIO().copy(from_path, to_path)
elif to_path.startswith('gs://'):
# Only target is a GCS file, read local file and upload.
with open(from_path, 'rb') as f:
with gcsio.GcsIO().open(to_path, mode='wb') as g:
pfun = functools.partial(f.read, gcsio.WRITE_CHUNK_SIZE)
for chunk in iter(pfun, ''):
g.write(chunk)
else:
# Source is a GCS file but target is local file.
with gcsio.GcsIO().open(from_path, mode='rb') as g:
with open(to_path, 'wb') as f:
pfun = functools.partial(g.read, gcsio.DEFAULT_READ_BUFFER_SIZE)
for chunk in iter(pfun, ''):
f.write(chunk)
else:
# Branch used only for unit tests and integration tests.
# In such environments GCS support is not available.
if not os.path.isdir(os.path.dirname(to_path)):
logging.info('Created folder (since we have not done yet, and any errors '
'will follow): %s ', os.path.dirname(to_path))
os.mkdir(os.path.dirname(to_path))
shutil.copyfile(from_path, to_path)
def _dependency_file_download(from_url, to_folder):
"""Downloads a file from a URL and returns path to the local file."""
# TODO(silviuc): We should cache downloads so we do not do it for every job.
try:
# We check if the file is actually there because wget returns a file
# even for a 404 response (file will contain the contents of the 404
# response).
response, content = __import__('httplib2').Http().request(from_url)
if int(response['status']) >= 400:
raise RuntimeError(
'Beam SDK not found at %s (response: %s)' % (from_url, response))
local_download_file = os.path.join(to_folder, 'beam-sdk.tar.gz')
with open(local_download_file, 'w') as f:
f.write(content)
except Exception:
logging.info('Failed to download Beam SDK from %s', from_url)
raise
return local_download_file
def _stage_extra_packages(extra_packages, staging_location, temp_dir,
file_copy=_dependency_file_copy):
"""Stages a list of local extra packages.
Args:
extra_packages: Ordered list of local paths to extra packages to be staged.
staging_location: Staging location for the packages.
temp_dir: Temporary folder where the resource building can happen. Caller
is responsible for cleaning up this folder after this function returns.
file_copy: Callable for copying files. The default version will copy from
a local file to a GCS location using the gsutil tool available in the
Google Cloud SDK package.
Returns:
A list of file names (no paths) for the resources staged. All the files
are assumed to be staged in staging_location.
Raises:
RuntimeError: If files specified are not found or do not have expected
name patterns.
"""
resources = []
staging_temp_dir = None
local_packages = []
for package in extra_packages:
if not (os.path.basename(package).endswith('.tar') or
os.path.basename(package).endswith('.tar.gz') or
os.path.basename(package).endswith('.whl')):
raise RuntimeError(
'The --extra_package option expects a full path ending with '
'".tar" or ".tar.gz" instead of %s' % package)
if os.path.basename(package).endswith('.whl'):
logging.warning(
'The .whl package "%s" is provided in --extra_package. '
'This functionality is not officially supported. Since wheel '
'packages are binary distributions, this package must be '
'binary-compatible with the worker environment (e.g. Python 2.7 '
'running on an x64 Linux host).')
if not os.path.isfile(package):
if package.startswith('gs://'):
if not staging_temp_dir:
staging_temp_dir = tempfile.mkdtemp(dir=temp_dir)
logging.info('Downloading extra package: %s locally before staging',
package)
if os.path.isfile(staging_temp_dir):
local_file_path = staging_temp_dir
else:
_, last_component = FileSystems.split(package)
local_file_path = FileSystems.join(staging_temp_dir, last_component)
_dependency_file_copy(package, local_file_path)
else:
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--extra_packages command line option.' % package)
else:
local_packages.append(package)
if staging_temp_dir:
local_packages.extend(
[FileSystems.join(staging_temp_dir, f) for f in os.listdir(
staging_temp_dir)])
for package in local_packages:
basename = os.path.basename(package)
staged_path = FileSystems.join(staging_location, basename)
file_copy(package, staged_path)
resources.append(basename)
# Create a file containing the list of extra packages and stage it.
# The file is important so that in the worker the packages are installed
# exactly in the order specified. This approach will avoid extra PyPI
# requests. For example if package A depends on package B and package A
# is installed first then the installer will try to satisfy the
# dependency on B by downloading the package from PyPI. If package B is
# installed first this is avoided.
with open(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), 'wt') as f:
for package in local_packages:
f.write('%s\n' % os.path.basename(package))
staged_path = FileSystems.join(staging_location, EXTRA_PACKAGES_FILE)
# Note that the caller of this function is responsible for deleting the
# temporary folder where all temp files are created, including this one.
file_copy(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), staged_path)
resources.append(EXTRA_PACKAGES_FILE)
return resources
def _get_python_executable():
# Allow overriding the python executable to use for downloading and
# installing dependencies, otherwise use the python executable for
# the current process.
python_bin = os.environ.get('BEAM_PYTHON') or sys.executable
if not python_bin:
raise ValueError('Could not find Python executable.')
return python_bin
def _populate_requirements_cache(requirements_file, cache_dir):
# The 'pip download' command will not download again if it finds the
# tarball with the proper version already present.
# It will get the packages downloaded in the order they are presented in
# the requirements file and will not download package dependencies.
cmd_args = [
_get_python_executable(), '-m', 'pip', 'install', '--download', cache_dir,
'-r', requirements_file,
# Download from PyPI source distributions.
'--no-binary', ':all:']
logging.info('Executing command: %s', cmd_args)
processes.check_call(cmd_args)
def stage_job_resources(
options, file_copy=_dependency_file_copy, build_setup_args=None,
temp_dir=None, populate_requirements_cache=_populate_requirements_cache):
"""For internal use only; no backwards-compatibility guarantees.
Creates (if needed) and stages job resources to options.staging_location.
Args:
options: Command line options. More specifically the function will expect
staging_location, requirements_file, setup_file, and save_main_session
options to be present.
file_copy: Callable for copying files. The default version will copy from
a local file to a GCS location using the gsutil tool available in the
Google Cloud SDK package.
build_setup_args: A list of command line arguments used to build a setup
package. Used only if options.setup_file is not None. Used only for
testing.
temp_dir: Temporary folder where the resource building can happen. If None
then a unique temp directory will be created. Used only for testing.
populate_requirements_cache: Callable for populating the requirements cache.
Used only for testing.
Returns:
A list of file names (no paths) for the resources staged. All the files
are assumed to be staged in options.staging_location.
Raises:
RuntimeError: If files specified are not found or error encountered while
trying to create the resources (e.g., build a setup package).
"""
temp_dir = temp_dir or tempfile.mkdtemp()
resources = []
google_cloud_options = options.view_as(GoogleCloudOptions)
setup_options = options.view_as(SetupOptions)
# Make sure that all required options are specified. There are a few that have
# defaults to support local running scenarios.
if google_cloud_options.staging_location is None:
raise RuntimeError(
'The --staging_location option must be specified.')
if google_cloud_options.temp_location is None:
raise RuntimeError(
'The --temp_location option must be specified.')
# Stage a requirements file if present.
if setup_options.requirements_file is not None:
if not os.path.isfile(setup_options.requirements_file):
raise RuntimeError('The file %s cannot be found. It was specified in the '
'--requirements_file command line option.' %
setup_options.requirements_file)
staged_path = FileSystems.join(google_cloud_options.staging_location,
REQUIREMENTS_FILE)
file_copy(setup_options.requirements_file, staged_path)
resources.append(REQUIREMENTS_FILE)
requirements_cache_path = (
os.path.join(tempfile.gettempdir(), 'dataflow-requirements-cache')
if setup_options.requirements_cache is None
else setup_options.requirements_cache)
# Populate cache with packages from requirements and stage the files
# in the cache.
if not os.path.exists(requirements_cache_path):
os.makedirs(requirements_cache_path)
populate_requirements_cache(
setup_options.requirements_file, requirements_cache_path)
for pkg in glob.glob(os.path.join(requirements_cache_path, '*')):
file_copy(pkg, FileSystems.join(google_cloud_options.staging_location,
os.path.basename(pkg)))
resources.append(os.path.basename(pkg))
# Handle a setup file if present.
# We will build the setup package locally and then copy it to the staging
# location because the staging location is a GCS path and the file cannot be
# created directly there.
if setup_options.setup_file is not None:
if not os.path.isfile(setup_options.setup_file):
raise RuntimeError('The file %s cannot be found. It was specified in the '
'--setup_file command line option.' %
setup_options.setup_file)
if os.path.basename(setup_options.setup_file) != 'setup.py':
raise RuntimeError(
'The --setup_file option expects the full path to a file named '
'setup.py instead of %s' % setup_options.setup_file)
tarball_file = _build_setup_package(setup_options.setup_file, temp_dir,
build_setup_args)
staged_path = FileSystems.join(google_cloud_options.staging_location,
WORKFLOW_TARBALL_FILE)
file_copy(tarball_file, staged_path)
resources.append(WORKFLOW_TARBALL_FILE)
# Handle extra local packages that should be staged.
if setup_options.extra_packages is not None:
resources.extend(
_stage_extra_packages(setup_options.extra_packages,
google_cloud_options.staging_location,
temp_dir=temp_dir, file_copy=file_copy))
# Pickle the main session if requested.
# We will create the pickled main session locally and then copy it to the
# staging location because the staging location is a GCS path and the file
# cannot be created directly there.
if setup_options.save_main_session:
pickled_session_file = os.path.join(temp_dir,
names.PICKLED_MAIN_SESSION_FILE)
pickler.dump_session(pickled_session_file)
staged_path = FileSystems.join(google_cloud_options.staging_location,
names.PICKLED_MAIN_SESSION_FILE)
file_copy(pickled_session_file, staged_path)
resources.append(names.PICKLED_MAIN_SESSION_FILE)
if hasattr(setup_options, 'sdk_location'):
if setup_options.sdk_location == 'default':
stage_tarball_from_remote_location = True
elif (setup_options.sdk_location.startswith('gs://') or
setup_options.sdk_location.startswith('http://') or
setup_options.sdk_location.startswith('https://')):
stage_tarball_from_remote_location = True
else:
stage_tarball_from_remote_location = False
staged_path = FileSystems.join(google_cloud_options.staging_location,
names.DATAFLOW_SDK_TARBALL_FILE)
if stage_tarball_from_remote_location:
# If --sdk_location is not specified then the appropriate package
# will be obtained from PyPI (https://pypi.python.org) based on the
# version of the currently running SDK. If the option is
# present then no version matching is made and the exact URL or path
# is expected.
#
# Unit tests running in the 'python setup.py test' context will
# not have the sdk_location attribute present and therefore we
# will not stage a tarball.
if setup_options.sdk_location == 'default':
sdk_remote_location = 'pypi'
else:
sdk_remote_location = setup_options.sdk_location
_stage_beam_sdk_tarball(sdk_remote_location, staged_path, temp_dir)
resources.append(names.DATAFLOW_SDK_TARBALL_FILE)
else:
# Check if we have a local Beam SDK tarball present. This branch is
# used by tests running with the SDK built at head.
if setup_options.sdk_location == 'default':
module_path = os.path.abspath(__file__)
sdk_path = os.path.join(
os.path.dirname(module_path), '..', '..', '..',
names.DATAFLOW_SDK_TARBALL_FILE)
elif os.path.isdir(setup_options.sdk_location):
sdk_path = os.path.join(
setup_options.sdk_location, names.DATAFLOW_SDK_TARBALL_FILE)
else:
sdk_path = setup_options.sdk_location
if os.path.isfile(sdk_path):
logging.info('Copying Beam SDK "%s" to staging location.', sdk_path)
file_copy(sdk_path, staged_path)
resources.append(names.DATAFLOW_SDK_TARBALL_FILE)
else:
if setup_options.sdk_location == 'default':
raise RuntimeError('Cannot find default Beam SDK tar file "%s"',
sdk_path)
elif not setup_options.sdk_location:
logging.info('Beam SDK will not be staged since --sdk_location '
'is empty.')
else:
raise RuntimeError(
'The file "%s" cannot be found. Its location was specified by '
'the --sdk_location command-line option.' %
sdk_path)
# Delete all temp files created while staging job resources.
shutil.rmtree(temp_dir)
return resources
def _build_setup_package(setup_file, temp_dir, build_setup_args=None):
saved_current_directory = os.getcwd()
try:
os.chdir(os.path.dirname(setup_file))
if build_setup_args is None:
build_setup_args = [
_get_python_executable(), os.path.basename(setup_file),
'sdist', '--dist-dir', temp_dir]
logging.info('Executing command: %s', build_setup_args)
processes.check_call(build_setup_args)
output_files = glob.glob(os.path.join(temp_dir, '*.tar.gz'))
if not output_files:
raise RuntimeError(
'File %s not found.' % os.path.join(temp_dir, '*.tar.gz'))
return output_files[0]
finally:
os.chdir(saved_current_directory)
def _stage_beam_sdk_tarball(sdk_remote_location, staged_path, temp_dir):
"""Stage a Beam SDK tarball with the appropriate version.
Args:
sdk_remote_location: A GCS path to a SDK tarball or a URL from
the file can be downloaded.
staged_path: GCS path where the found SDK tarball should be copied.
temp_dir: path to temporary location where the file should be downloaded.
Raises:
RuntimeError: If wget on the URL specified returs errors or the file
cannot be copied from/to GCS.
"""
if (sdk_remote_location.startswith('http://') or
sdk_remote_location.startswith('https://')):
logging.info(
'Staging Beam SDK tarball from %s to %s',
sdk_remote_location, staged_path)
local_download_file = _dependency_file_download(
sdk_remote_location, temp_dir)
_dependency_file_copy(local_download_file, staged_path)
elif sdk_remote_location.startswith('gs://'):
# Stage the file to the GCS staging area.
logging.info(
'Staging Beam SDK tarball from %s to %s',
sdk_remote_location, staged_path)
_dependency_file_copy(sdk_remote_location, staged_path)
elif sdk_remote_location == 'pypi':
logging.info('Staging the SDK tarball from PyPI to %s', staged_path)
_dependency_file_copy(_download_pypi_sdk_package(temp_dir), staged_path)
else:
raise RuntimeError(
'The --sdk_location option was used with an unsupported '
'type of location: %s' % sdk_remote_location)
def get_runner_harness_container_image():
"""For internal use only; no backwards-compatibility guarantees.
Returns:
str: Runner harness container image that shall be used by default
for current SDK version or None if the runner harness container image
bundled with the service shall be used.
"""
try:
version = pkg_resources.get_distribution(GOOGLE_PACKAGE_NAME).version
# Pin runner harness for Dataflow releases.
return (DATAFLOW_CONTAINER_IMAGE_REPOSITORY + '/' + 'harness' + ':' +
version)
except pkg_resources.DistributionNotFound:
# Pin runner harness for BEAM releases.
if 'dev' not in beam_version.__version__:
return (DATAFLOW_CONTAINER_IMAGE_REPOSITORY + '/' + 'harness' + ':' +
beam_version.__version__)
# Don't pin runner harness for BEAM head so that we can notice
# potential incompatibility between runner and sdk harnesses.
return None
def get_default_container_image_for_current_sdk(job_type):
"""For internal use only; no backwards-compatibility guarantees.
Args:
job_type (str): BEAM job type.
Returns:
str: Google Cloud Dataflow container image for remote execution.
"""
# TODO(tvalentyn): Use enumerated type instead of strings for job types.
if job_type == 'FNAPI_BATCH' or job_type == 'FNAPI_STREAMING':
image_name = 'dataflow.gcr.io/v1beta3/python-fnapi'
else:
image_name = 'dataflow.gcr.io/v1beta3/python'
image_tag = _get_required_container_version(job_type)
return image_name + ':' + image_tag
def _get_required_container_version(job_type=None):
"""For internal use only; no backwards-compatibility guarantees.
Args:
job_type (str, optional): BEAM job type. Defaults to None.
Returns:
str: The tag of worker container images in GCR that corresponds to
current version of the SDK.
"""
# TODO(silviuc): Handle apache-beam versions when we have official releases.
try:
version = pkg_resources.get_distribution(GOOGLE_PACKAGE_NAME).version
# We drop any pre/post parts of the version and we keep only the X.Y.Z
# format. For instance the 0.3.0rc2 SDK version translates into 0.3.0.
container_version = (
'%s.%s.%s' % pkg_resources.parse_version(version)._version.release)
# We do, however, keep the ".dev" suffix if it is present.
if re.match(r'.*\.dev[0-9]*$', version):
container_version += '.dev'
return container_version
except pkg_resources.DistributionNotFound:
# This case covers Apache Beam end-to-end testing scenarios. All these tests
# will run with a special container version.
if job_type == 'FNAPI_BATCH' or job_type == 'FNAPI_STREAMING':
return BEAM_FNAPI_CONTAINER_VERSION
else:
return BEAM_CONTAINER_VERSION
def get_sdk_name_and_version():
"""For internal use only; no backwards-compatibility guarantees.
Returns name and version of SDK reported to Google Cloud Dataflow."""
container_version = _get_required_container_version()
try:
pkg_resources.get_distribution(GOOGLE_PACKAGE_NAME)
return (GOOGLE_SDK_NAME, container_version)
except pkg_resources.DistributionNotFound:
return (BEAM_SDK_NAME, beam_version.__version__)
def get_sdk_package_name():
"""For internal use only; no backwards-compatibility guarantees.
Returns the PyPI package name to be staged to Google Cloud Dataflow."""
sdk_name, _ = get_sdk_name_and_version()
if sdk_name == GOOGLE_SDK_NAME:
return GOOGLE_PACKAGE_NAME
else:
return BEAM_PACKAGE_NAME
def _download_pypi_sdk_package(temp_dir):
"""Downloads SDK package from PyPI and returns path to local path."""
package_name = get_sdk_package_name()
try:
version = pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
raise RuntimeError('Please set --sdk_location command-line option '
'or install a valid {} distribution.'
.format(package_name))
# Get a source distribution for the SDK package from PyPI.
cmd_args = [
_get_python_executable(), '-m', 'pip', 'install', '--download', temp_dir,
'%s==%s' % (package_name, version),
'--no-binary', ':all:', '--no-deps']
logging.info('Executing command: %s', cmd_args)
processes.check_call(cmd_args)
zip_expected = os.path.join(
temp_dir, '%s-%s.zip' % (package_name, version))
if os.path.exists(zip_expected):
return zip_expected
tgz_expected = os.path.join(
temp_dir, '%s-%s.tar.gz' % (package_name, version))
if os.path.exists(tgz_expected):
return tgz_expected
raise RuntimeError(
'Failed to download a source distribution for the running SDK. Expected '
'either %s or %s to be found in the download folder.' % (
zip_expected, tgz_expected))
| apache-2.0 |
shashankbassi92/tornado | tornado/tcpclient.py | 208 | 6802 | #!/usr/bin/env python
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking TCP connection factory.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import socket
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado import gen
from tornado.netutil import Resolver
_INITIAL_CONNECT_TIMEOUT = 0.3
class _Connector(object):
"""A stateless implementation of the "Happy Eyeballs" algorithm.
"Happy Eyeballs" is documented in RFC6555 as the recommended practice
for when both IPv4 and IPv6 addresses are available.
In this implementation, we partition the addresses by family, and
make the first connection attempt to whichever address was
returned first by ``getaddrinfo``. If that connection fails or
times out, we begin a connection in parallel to the first address
of the other family. If there are additional failures we retry
with other addresses, keeping one connection attempt per family
in flight at a time.
http://tools.ietf.org/html/rfc6555
"""
def __init__(self, addrinfo, io_loop, connect):
self.io_loop = io_loop
self.connect = connect
self.future = Future()
self.timeout = None
self.last_error = None
self.remaining = len(addrinfo)
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
@staticmethod
def split(addrinfo):
"""Partition the ``addrinfo`` list by address family.
Returns two lists. The first list contains the first entry from
``addrinfo`` and all others with the same family, and the
second list contains all other addresses (normally one list will
be AF_INET and the other AF_INET6, although non-standard resolvers
may return additional families).
"""
primary = []
secondary = []
primary_af = addrinfo[0][0]
for af, addr in addrinfo:
if af == primary_af:
primary.append((af, addr))
else:
secondary.append((af, addr))
return primary, secondary
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
self.try_connect(iter(self.primary_addrs))
self.set_timout(timeout)
return self.future
def try_connect(self, addrs):
try:
af, addr = next(addrs)
except StopIteration:
# We've reached the end of our queue, but the other queue
# might still be working. Send a final error on the future
# only when both queues are finished.
if self.remaining == 0 and not self.future.done():
self.future.set_exception(self.last_error or
IOError("connection failed"))
return
future = self.connect(af, addr)
future.add_done_callback(functools.partial(self.on_connect_done,
addrs, af, addr))
def on_connect_done(self, addrs, af, addr, future):
self.remaining -= 1
try:
stream = future.result()
except Exception as e:
if self.future.done():
return
# Error: try again (but remember what happened so we have an
# error to raise in the end)
self.last_error = e
self.try_connect(addrs)
if self.timeout is not None:
# If the first attempt failed, don't wait for the
# timeout to try an address from the secondary queue.
self.io_loop.remove_timeout(self.timeout)
self.on_timeout()
return
self.clear_timeout()
if self.future.done():
# This is a late arrival; just drop it.
stream.close()
else:
self.future.set_result((af, addr, stream))
def set_timout(self, timeout):
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
self.on_timeout)
def on_timeout(self):
self.timeout = None
self.try_connect(iter(self.secondary_addrs))
def clear_timeout(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
class TCPClient(object):
"""A non-blocking TCP connection factory.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, resolver=None, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
if resolver is not None:
self.resolver = resolver
self._own_resolver = False
else:
self.resolver = Resolver(io_loop=io_loop)
self._own_resolver = True
def close(self):
if self._own_resolver:
self.resolver.close()
@gen.coroutine
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
max_buffer_size=None):
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
``ssl_options`` is not None).
"""
addrinfo = yield self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo, self.io_loop,
functools.partial(self._create_stream, max_buffer_size))
af, addr, stream = yield connector.start()
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
raise gen.Return(stream)
def _create_stream(self, max_buffer_size, af, addr):
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
stream = IOStream(socket.socket(af),
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
return stream.connect(addr)
| apache-2.0 |
busyStone/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/__init__.py | 215 | 2116 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . base import EXT_MSG, EXT_SRV, SEP, log, plog, InvalidMsgSpec, log_verbose, MsgGenerationException
from . gentools import compute_md5, compute_full_text, compute_md5_text
from . names import resource_name_base, package_resource_name, is_legal_resource_base_name, \
resource_name_package, resource_name, is_legal_resource_name
from . msgs import HEADER, TIME, DURATION, MsgSpec, Constant, Field
from . msg_loader import MsgNotFound, MsgContext, load_depends, load_msg_by_type, load_srv_by_type
from . srvs import SrvSpec
| gpl-3.0 |
cloudfoundry/php-buildpack | fixtures/symfony_5_local_deps/vendor/doctrine/orm/docs/en/conf.py | 24 | 6535 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-%y, Doctrine Project Team'.format(datetime.date.today)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| apache-2.0 |
snoopspy/gtest | test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
jrutila/django-shop | docs/conf.py | 19 | 7127 | # -*- coding: utf-8 -*-
#
# django SHOP documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 6 14:42:25 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django SHOP'
copyright = u'2010, Chris Glass'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, path)
import shop
version = shop.__version__
# The full version, including alpha/beta/rc tags.
release = shop.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoShopdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index_', 'DjangoShop.tex', u'Django SHOP Documentation',
u'Chris Glass', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index_', 'djangoshop', u'Django SHOP Documentation',
[u'Chris Glass'], 1)
]
| bsd-3-clause |
katjoyce/certificate-transparency | python/ct/client/async_log_client.py | 7 | 18328 | """RFC 6962 client API."""
from ct.client import log_client
from ct.client.db import database
import gflags
import logging
import random
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import protocol
from twisted.internet import reactor as ireactor
from twisted.internet import task
from twisted.internet import threads
from twisted.python import failure
from twisted.web import client
from twisted.web import http
from twisted.web import iweb
from Queue import Queue
from zope.interface import implements
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("max_fetchers_in_parallel", 100, "Maximum number of "
"concurrent fetches.")
gflags.DEFINE_integer("get_entries_retry_delay", 1, "Number of seconds after "
"which get-entries will be retried if it encountered "
"an error.")
gflags.DEFINE_integer("entries_buffer", 100000, "Size of buffer which stores "
"fetched entries before async log client is able to "
"return them. 100000 entries shouldn't take more "
"than 600 Mb of memory.")
gflags.DEFINE_integer("response_buffer_size_bytes", 50 * 1000 * 1000, "Maximum "
"size of a single response buffer. Should be set such "
"that a get_entries response comfortably fits in the "
"the buffer. A typical log entry is expected to be < "
"10kB.")
gflags.DEFINE_bool("persist_entries", True, "Cache entries on disk.")
class HTTPConnectionError(log_client.HTTPError):
"""Connection failed."""
pass
class HTTPResponseSizeExceededError(log_client.HTTPError):
"""HTTP response exceeded maximum permitted size."""
pass
###############################################################################
# The asynchronous twisted log client. #
###############################################################################
class ResponseBodyHandler(protocol.Protocol):
"""Response handler for HTTP requests."""
def __init__(self, finished):
"""Initialize the one-off response handler.
Args:
finished: a deferred that will be fired with the body when the
complete response has been received; or with an error when the
connection is lost.
"""
self._finished = finished
def connectionMade(self):
self._buffer = []
self._len = 0
self._overflow = False
def dataReceived(self, data):
self._len += len(data)
if self._len > FLAGS.response_buffer_size_bytes:
# Note this flag has to be set *before* calling loseConnection()
# to ensure connectionLost gets called with the flag set.
self._overflow = True
self.transport.loseConnection()
else:
self._buffer.append(data)
def connectionLost(self, reason):
if self._overflow:
self._finished.errback(HTTPResponseSizeExceededError(
"Connection aborted: response size exceeded %d bytes" %
FLAGS.response_buffer_size_bytes))
elif not reason.check(*(error.ConnectionDone, client.ResponseDone,
http.PotentialDataLoss)):
self._finished.errback(HTTPConnectionError(
"Connection lost (received %d bytes)" % self._len))
else:
body = "".join(self._buffer)
self._finished.callback(body)
class AsyncRequestHandler(object):
"""A helper for asynchronous response body delivery."""
def __init__(self, agent):
self._agent = agent
@staticmethod
def _response_cb(response):
try:
log_client.RequestHandler.check_response_status(
response.code, response.phrase,
list(response.headers.getAllRawHeaders()))
except log_client.HTTPError as e:
return failure.Failure(e)
finished = defer.Deferred()
response.deliverBody(ResponseBodyHandler(finished))
return finished
@staticmethod
def _make_request(path, params):
if not params:
return path
return path + "?" + "&".join(["%s=%s" % (key, value)
for key, value in params.iteritems()])
def get(self, path, params=None):
d = self._agent.request("GET", self._make_request(path, params))
d.addCallback(self._response_cb)
return d
class EntryProducer(object):
"""A push producer for log entries."""
implements(iweb.IBodyProducer)
def __init__(self, handler, reactor, uri, start, end,
batch_size, entries_db=None):
self._handler = handler
self._reactor = reactor
self._uri = uri
self._entries_db = entries_db
self._consumer = None
assert 0 <= start <= end
self._start = start
self._end = end
self._current = self._start
self._batch_size = batch_size
self._batches = Queue()
self._currently_fetching = 0
self._currently_stored = 0
self._last_fetching = self._current
self._max_currently_fetching = (FLAGS.max_fetchers_in_parallel *
self._batch_size)
# Required attribute of the interface.
self.length = iweb.UNKNOWN_LENGTH
self.min_delay = FLAGS.get_entries_retry_delay
@property
def finished(self):
return self._current > self._end
def __fail(self, failure):
if not self._stopped:
self.stopProducing()
self._done.errback(failure)
@staticmethod
def _calculate_retry_delay(retries):
"""Calculates delay based on number of retries which already happened.
Random is there, so we won't attack server lots of requests exactly
at the same time, and 1.3 is nice constant for exponential back-off."""
return ((0.4 + random.uniform(0.3, 0.6)) * FLAGS.get_entries_retry_delay
* 1.4**retries)
def _response_eb(self, failure, first, last, retries):
"""Error back for HTTP errors"""
if not self._paused:
# if it's not last retry and failure wasn't our fault we retry
if (retries < FLAGS.get_entries_max_retries and
not failure.check(log_client.HTTPClientError)):
logging.info("Retrying get-entries for range <%d, %d> retry: %d"
% (first, last, retries))
d = task.deferLater(self._reactor,
self._calculate_retry_delay(retries),
self._fetch_parsed_entries,
first, last)
d.addErrback(self._response_eb, first, last, retries + 1)
return d
else:
self.__fail(failure)
def _fetch_eb(self, failure):
"""Error back for errors after getting result of a request
(InvalidResponse)"""
self.__fail(failure)
def _write_pending(self):
d = defer.Deferred()
d.callback(None)
if self._pending:
self._current += len(self._pending)
self._currently_stored -= len(self._pending)
d = self._consumer.consume(self._pending)
self._pending = None
return d
def _batch_completed(self, result):
self._currently_fetching -= len(result)
self._currently_stored += len(result)
return result
def _store_batch(self, entry_batch, start_index):
assert self._entries_db
d = threads.deferToThread(self._entries_db.store_entries,
enumerate(entry_batch, start_index))
d.addCallback(lambda _: entry_batch)
return d
def _get_entries_from_db(self, first, last):
if FLAGS.persist_entries and self._entries_db:
d = threads.deferToThread(self._entries_db.scan_entries, first, last)
d.addCallbacks(lambda entries: list(entries))
d.addErrback(lambda fail: fail.trap(database.KeyError) and None)
return d
else:
d = defer.Deferred()
d.callback(None)
return d
def _fetch_parsed_entries(self, first, last):
# first check in database
d = self._get_entries_from_db(first, last)
d.addCallback(self._sub_fetch_parsed_entries, first, last)
return d
def _sub_fetch_parsed_entries(self, entries, first, last):
# it's not the best idea to attack server with many requests exactly at
# the same time, so requests are sent after slight delay.
if not entries:
request = task.deferLater(self._reactor,
self._calculate_retry_delay(0),
self._handler.get,
self._uri + "/" +
log_client._GET_ENTRIES_PATH,
params={"start": str(first),
"end": str(last)})
request.addCallback(log_client._parse_entries, last - first + 1)
if self._entries_db and FLAGS.persist_entries:
request.addCallback(self._store_batch, first)
entries = request
else:
deferred_entries = defer.Deferred()
deferred_entries.callback(entries)
entries = deferred_entries
return entries
def _create_next_request(self, first, last, entries, retries):
d = self._fetch_parsed_entries(first, last)
d.addErrback(self._response_eb, first, last, retries)
d.addCallback(lambda result: (entries + result, len(result)))
d.addCallback(self._fetch, first, last, retries)
return d
def _fetch(self, result, first, last, retries):
entries, last_fetched_entries_count = result
next_range_start = first + last_fetched_entries_count
if next_range_start > last:
return entries
return self._create_next_request(next_range_start, last,
entries, retries)
def _create_fetch_deferred(self, first, last, retries=0):
d = defer.Deferred()
d.addCallback(self._fetch, first, last, retries)
d.addCallback(self._batch_completed)
d.addErrback(self._fetch_eb)
d.callback(([], 0))
return d
@defer.deferredGenerator
def produce(self):
"""Produce entries."""
while not self._paused:
wfd = defer.waitForDeferred(self._write_pending())
yield wfd
wfd.getResult()
if self.finished:
self.finishProducing()
return
first = self._last_fetching
while (self._currently_fetching <= self._max_currently_fetching and
self._last_fetching <= self._end and
self._currently_stored <= FLAGS.entries_buffer):
last = min(self._last_fetching + self._batch_size - 1, self._end,
self._last_fetching + self._max_currently_fetching
- self._currently_fetching + 1)
self._batches.put(self._create_fetch_deferred(first, last))
self._currently_fetching += last - first + 1
first = last + 1
self._last_fetching = first
wfd = defer.waitForDeferred(self._batches.get())
# Pause here until the body of the response is available.
yield wfd
# The producer may have been paused while waiting for the response,
# or errored out upon receiving it: do not write the entries out
# until after the next self._paused check.
self._pending = wfd.getResult()
def startProducing(self, consumer):
"""Start producing entries.
The producer writes EntryResponse protos to the consumer in batches,
until all entries have been received, or an error occurs.
Args:
consumer: the consumer to write to.
Returns:
a deferred that fires when no more entries will be written.
Upon success, this deferred fires number of produced entries or
None if production wasn't successful. Upon failure, this deferred
fires with the appropriate HTTPError.
Raises:
RuntimeError: consumer already registered.
"""
if self._consumer:
raise RuntimeError("Producer already has a consumer registered")
self._consumer = consumer
self._stopped = False
self._paused = True
self._pending = None
self._done = defer.Deferred()
# An IBodyProducer should start producing immediately, without waiting
# for an explicit resumeProducing() call.
task.deferLater(self._reactor, 0, self.resumeProducing)
return self._done
def pauseProducing(self):
self._paused = True
def resumeProducing(self):
if self._paused and not self._stopped:
self._paused = False
d = self.produce()
d.addErrback(self.finishProducing)
def stopProducing(self):
self._paused = True
self._stopped = True
def finishProducing(self, failure=None):
self.stopProducing()
if not failure:
self._done.callback(self._end - self._start + 1)
else:
self._done.errback(failure)
class AsyncLogClient(object):
"""A twisted log client."""
def __init__(self, agent, uri, entries_db=None, reactor=ireactor):
"""Initialize the client.
If entries_db is specified and flag persist_entries is true, get_entries
will return stored entries.
Args:
agent: the agent to use.
uri: the uri of the log.
entries_db: object that conforms TempDB API
reactor: the reactor to use. Default is twisted.internet.reactor.
"""
self._handler = AsyncRequestHandler(agent)
#twisted expects bytes, so if uri is unicode we have to change encoding
self._uri = uri.encode('ascii')
self._reactor = reactor
self._entries_db = entries_db
@property
def servername(self):
return self._uri
def get_sth(self):
"""Get the current Signed Tree Head.
Returns:
a Deferred that fires with a ct.proto.client_pb2.SthResponse proto.
Raises:
HTTPError, HTTPConnectionError, HTTPClientError,
HTTPResponseSizeExceededError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
deferred_result = self._handler.get(self._uri + "/" +
log_client._GET_STH_PATH)
deferred_result.addCallback(log_client._parse_sth)
return deferred_result
def get_entries(self, start, end, batch_size=0):
"""Retrieve log entries.
Args:
start: index of first entry to retrieve.
end: index of last entry to retrieve.
batch_size: max number of entries to fetch in one go.
Returns:
an EntryProducer for the given range.
Raises:
InvalidRequestError: invalid request range (irrespective of log).
Caller is responsible for ensuring that (start, end) is a valid range
(by retrieving an STH first), otherwise a HTTPClientError may occur
during production.
"""
# Catch obvious mistakes here.
if start < 0 or end < 0 or start > end:
raise log_client.InvalidRequestError(
"Invalid range [%d, %d]" % (start, end))
batch_size = batch_size or FLAGS.entry_fetch_batch_size
return EntryProducer(self._handler, self._reactor, self._uri,
start, end, batch_size, self._entries_db)
def get_sth_consistency(self, old_size, new_size):
"""Retrieve a consistency proof.
Args:
old_size : size of older tree.
new_size : size of newer tree.
Returns:
a Deferred that fires with list of raw hashes (bytes) forming the
consistency proof
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
(old_size, new_size) are not valid for this log (e.g. greater
than the size of the log).
InvalidRequestError: invalid request size (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (old_size, new_size) are valid
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
if old_size > new_size:
raise log_client.InvalidRequestError(
"old > new: %s >= %s" % (old_size, new_size))
if old_size < 0 or new_size < 0:
raise log_client.InvalidRequestError(
"both sizes must be >= 0: %s, %s" % (old_size, new_size))
# don't need to contact remote server for trivial proofs:
# - empty tree is consistent with everything
# - everything is consistent with itself
if old_size == 0 or old_size == new_size:
d = defer.Deferred()
d.callback([])
return d
deferred_response = self._handler.get(
self._uri + "/" +
log_client._GET_STH_CONSISTENCY_PATH,
params={"first": old_size, "second": new_size})
deferred_response.addCallback(log_client._parse_consistency_proof,
self.servername)
return deferred_response
| apache-2.0 |
networks-lab/isilib | metaknowledge/medline/recordMedline.py | 2 | 5557 | #Written by Reid McIlroy-Young for Dr. John McLevey, University of Waterloo 2016
import collections
import itertools
import io
from ..mkExceptions import BadPubmedRecord, RCTypeError
from ..mkRecord import ExtendedRecord
from .tagProcessing.tagNames import tagNameConverterDict, authorBasedTags
from .tagProcessing.tagFunctions import medlineTagToFunc
from .tagProcessing.specialFunctions import medlineSpecialTagToFunc
class MedlineRecord(ExtendedRecord):
"""Class for full Medline(Pubmed) entries.
This class is an [ExtendedRecord](./ExtendedRecord.html#metaknowledge.ExtendedRecord) capable of generating its own id number. You should not create them directly, but instead use [medlineParser()](../modules/medline.html#metaknowledge.medline.medlineHandlers.medlineParser) on a medline file.
"""
def __init__(self, inRecord, sFile = "", sLine = 0):
bad = False
error = None
fieldDict = None
try:
if isinstance(inRecord, dict) or isinstance(inRecord, collections.OrderedDict):
fieldDict = collections.OrderedDict(inRecord)
elif isinstance(inRecord, itertools.chain):
fieldDict = medlineRecordParser(inRecord)
elif isinstance(inRecord, io.IOBase):
fieldDict = medlineRecordParser(enumerate(inRecord))
elif isinstance(inRecord, str):
def addCharToEnd(lst):
for s in lst:
yield s + '\n'
fieldDict = medlineRecordParser(enumerate(addCharToEnd(inRecord.split('\n')), start = 1))
#string io
else:
raise RCTypeError("Unsupported input type '{}', PubmedRecords cannot be created from '{}'".format(inRecord, type(inRecord)))
except BadPubmedRecord as b:
self.bad = True
self.error = b
fieldDict = collections.OrderedDict()
if fieldDict is not None:
if 'PMID' in fieldDict:
self._pubNum = "PMID:{}".format(fieldDict['PMID'][0])
else:
self._pubNum = None
bad = True
error = BadPubmedRecord("Missing PMID")
ExtendedRecord.__init__(self, fieldDict, self._pubNum, bad, error, sFile = sFile, sLine = sLine)
def encoding(self):
return 'latin-1'
@staticmethod
def getAltName(tag):
return tagNameConverterDict.get(tag)
@staticmethod
def tagProcessingFunc(tag):
return medlineTagToFunc[tag]
def specialFuncs(self, key):
#This will usually raise a key error that needs to be caught higher up
#Catching it here would require unnecessary overhead
return medlineSpecialTagToFunc[key](self)
def writeRecord(self, f):
"""This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic.
"""
if self.bad:
raise BadPubmedRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile))
else:
authTags = {}
for tag in authorBasedTags:
for val in self._fieldDict.get(tag, []):
split = val.split(' : ')
try:
authTags[split[0]].append("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n ')))
except KeyError:
authTags[split[0]] = ["{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))]
for tag, value in self._fieldDict.items():
if tag in authorBasedTags:
continue
else:
for v in value:
f.write("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)), v.replace('\n', '\n ')))
if tag == 'AU':
for authVal in authTags.get(v,[]):
f.write(authVal)
def medlineRecordParser(record):
"""The parser [`MedlineRecord`](../classes/MedlineRecord.html#metaknowledge.medline.MedlineRecord) use. This takes an entry from [medlineParser()](#metaknowledge.medline.medlineHandlers.medlineParser) and parses it a part of the creation of a `MedlineRecord`.
# Parameters
_record_ : `enumerate object`
> a file wrapped by `enumerate()`
# Returns
`collections.OrderedDict`
> An ordered dictionary of the key-vaue pairs in the entry
"""
tagDict = collections.OrderedDict()
tag = 'PMID'
mostRecentAuthor = None
for lineNum, line in record:
tmptag = line[:4].rstrip()
contents = line[6:-1]
if tmptag.isalpha() and line[4] == '-':
tag = tmptag
if tag == 'AU':
mostRecentAuthor = contents
if tag in authorBasedTags:
contents = "{} : {}".format(mostRecentAuthor, contents)
try:
tagDict[tag].append(contents)
except KeyError:
tagDict[tag] = [contents]
elif line[:6] == ' ':
tagDict[tag][-1] += '\n' + line[6:-1]
elif line == '\n':
break
else:
raise BadPubmedRecord("Tag not formed correctly on line {}: '{}'".format(lineNum, line))
return tagDict
| gpl-2.0 |
akx/lepo | lepo/apidef/operation/base.py | 1 | 2258 | from collections.__init__ import OrderedDict
from django.utils.functional import cached_property
from lepo.utils import maybe_resolve
class Operation:
parameter_class = None # This should never be used
def __init__(self, api, path, method, data):
"""
:type api: lepo.apidef.doc.APIDefinition
:type path: lepo.apidef.path.Path
:type method: str
:type data: dict
"""
self.api = api
self.path = path
self.method = method
self.data = data
@property
def id(self):
return self.data['operationId']
@cached_property
def parameters(self):
"""
Combined path-level and operation-level parameters.
Any $refs are resolved here.
Note that this implementation differs from the spec in that we only use
the _name_ of a parameter to consider its uniqueness, not the name and location.
This is because we end up passing parameters to the handler by name anyway,
so any duplicate names, even if they had different locations, would be horribly mangled.
:rtype: list[Parameter]
"""
return list(self.get_parameter_dict().values())
def get_parameter_dict(self):
parameters = OrderedDict()
for parameter in self._get_regular_parameters():
parameters[parameter.name] = parameter
return parameters
def _get_regular_parameters(self):
for source in (
self.path.mapping.get('parameters', ()),
self.data.get('parameters', {}),
):
source = maybe_resolve(source, self.api.resolve_reference)
for parameter in source:
parameter_data = maybe_resolve(parameter, self.api.resolve_reference)
parameter = self.parameter_class(data=parameter_data, operation=self, api=self.api)
yield parameter
def _get_overridable(self, key, default=None):
# TODO: This probes a little too deeply into the specifics of these objects, I think...
for obj in (
self.data,
self.path.mapping,
self.api.doc,
):
if key in obj:
return obj[key]
return default
| mit |
trishnaguha/ansible | lib/ansible/utils/module_docs_fragments/cloudstack.py | 53 | 2922 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard cloudstack documentation fragment
DOCUMENTATION = '''
options:
api_key:
description:
- API key of the CloudStack API.
- If not given, the C(CLOUDSTACK_KEY) env variable is considered.
- As the last option, the value is taken from the ini config file, also see the notes.
api_secret:
description:
- Secret key of the CloudStack API.
- If not set, the C(CLOUDSTACK_SECRET) env variable is considered.
- As the last option, the value is taken from the ini config file, also see the notes.
api_url:
description:
- URL of the CloudStack API e.g. https://cloud.example.com/client/api.
- If not given, the C(CLOUDSTACK_ENDPOINT) env variable is considered.
- As the last option, the value is taken from the ini config file, also see the notes.
api_http_method:
description:
- HTTP method used to query the API endpoint.
- If not given, the C(CLOUDSTACK_METHOD) env variable is considered.
- As the last option, the value is taken from the ini config file, also see the notes.
- Fallback value is C(get) if not specified.
choices: [ get, post ]
api_timeout:
description:
- HTTP timeout in seconds.
- If not given, the C(CLOUDSTACK_TIMEOUT) env variable is considered.
- As the last option, the value is taken from the ini config file, also see the notes.
- Fallback value is 10 seconds if not specified.
api_region:
description:
- Name of the ini section in the C(cloustack.ini) file.
- If not given, the C(CLOUDSTACK_REGION) env variable is considered.
default: cloudstack
requirements:
- "python >= 2.6"
- "cs >= 0.6.10"
notes:
- Ansible uses the C(cs) library's configuration method if credentials are not
provided by the arguments C(api_url), C(api_key), C(api_secret).
Configuration is read from several locations, in the following order.
The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables.
A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file.
A C(cloudstack.ini) file in the current working directory.
A C(.cloudstack.ini) file in the users home directory.
Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
Use the argument C(api_region) to select the section name, default section is C(cloudstack).
See https://github.com/exoscale/cs for more information.
- A detailed guide about cloudstack modules can be found in the L(CloudStack Cloud Guide,../scenario_guides/guide_cloudstack.html).
- This module supports check mode.
'''
| gpl-3.0 |
JustinWingChungHui/okKindred | family_tree/models/relation.py | 2 | 5182 | from django.db import models
from family_tree.models.person import Person
from django.utils.translation import ugettext_lazy as _
import reversion
#Relation types. Note that 'raised by' will resolve to 'raised' but inverse
PARTNERED = 1
RAISED = 2
RAISED_BY = 3
RELATION_TYPES = (
(PARTNERED, _('Partnered')),
(RAISED, _('Raised')),
(RAISED_BY, _('Raised By')),
)
class RelationManager(models.Manager):
'''
Custom manager to represent relations
'''
def get_all_relations_for_family_id(self, family_id):
'''
Gets all the relations for a family, omit the tracking fields for speed
'''
return self.raw("""
SELECT r.id, r.from_person_id, r.to_person_id, r.relation_type
FROM family_tree_person p
INNER JOIN family_tree_relation r
ON r.from_person_id = p.id
AND p.family_id={0}
""".format(family_id))
def get_navigable_relations(self, family_id, relations=None):
'''
Gets the relations in a navigable format to determine paths
returns a dictionary of paths by person id
'''
if not relations:
relations = self.get_all_relations_for_family_id(family_id)
paths_by_person = {}
for relation in relations:
#Add the from person path
if not relation.from_person_id in paths_by_person:
paths_by_person[relation.from_person_id] = []
paths_by_person[relation.from_person_id].append(relation)
#Add the to person path
if not relation.to_person_id in paths_by_person:
paths_by_person[relation.to_person_id] = []
paths_by_person[relation.to_person_id].append(self._create_inverted_relation(relation))
return paths_by_person
def _create_inverted_relation(self, relation):
'''
Creates inverted relation, used to determine paths
'''
if relation.relation_type == PARTNERED:
new_type = PARTNERED
elif relation.relation_type == RAISED:
new_type = RAISED_BY
elif relation.relation_type == RAISED_BY:
new_type = RAISED
return Relation(from_person_id=relation.to_person_id
,to_person_id=relation.from_person_id
,relation_type=new_type)
@reversion.register()
class Relation(models.Model):
'''
Represent a relation between two people
'''
class Meta:
#Allows models.py to be split up across multiple files
app_label = 'family_tree'
#Allows one relation betwen two people
unique_together = (('from_person', 'to_person'),)
indexes = [
models.Index(fields=['from_person']),
models.Index(fields=['to_person'])
]
#Customer Manager
objects = RelationManager()
#Required fields
from_person = models.ForeignKey(Person, related_name = 'from_person', null = False, blank = False, on_delete=models.CASCADE)
to_person = models.ForeignKey(Person, related_name = 'to_person', null = False, blank = False, on_delete=models.CASCADE)
relation_type = models.IntegerField(choices=RELATION_TYPES, null = False, blank = False)
#Tracking
creation_date = models.DateTimeField(auto_now_add=True)
last_updated_date = models.DateTimeField(auto_now=True)
def __str__(self): # __unicode__ on Python 2
return self.from_person.name + '-' + self.to_person.name
def normalise(self):
'''
This normalises the relations.
'raised by' will be resolved to 'raised' but inverted relation
'Partnered' will ordered by gender with sorted alphabetically
'''
#Invert Raised by relationship
if self.relation_type == RAISED_BY:
self._invert_relationship()
self.relation_type = RAISED
if self.relation_type ==PARTNERED:
if self.from_person.gender == 'M' and self.to_person.gender == 'F':
self._invert_relationship()
elif self.from_person.gender == 'O' and self.to_person.gender == 'F':
self._invert_relationship()
elif self.from_person.gender == 'O' and self.to_person.gender == 'M':
self._invert_relationship()
def _invert_relationship(self):
'''
Swaps the from and to in the relationship
'''
from_id = self.from_person_id
self.from_person_id = self.to_person_id
self.to_person_id = from_id
def save(self, *args, **kwargs):
'''
Overrides the save method to allow normalisation
'''
self.normalise()
#Delete any relations already defined between both people
Relation.objects.filter(from_person_id = self.from_person_id, to_person_id = self.to_person_id).delete()
Relation.objects.filter(from_person_id = self.to_person_id, to_person_id = self.from_person_id).delete()
super(Relation, self).save(*args, **kwargs) # Call the "real" save() method.
| gpl-2.0 |
edx/xqueue-watcher | tests/test_grader.py | 1 | 4632 | import unittest
from unittest import mock
import json
import sys
from path import Path
from queue import Queue
from xqueue_watcher import grader
MYDIR = Path(__file__).dirname() / 'fixtures'
class MockGrader(grader.Grader):
def grade(self, grader_path, grader_config, student_response):
tests = []
errors = []
correct = 0
score = 0
if grader_path.endswith('/correct'):
correct = 1
score = 1
tests.append(('short', 'long', True, 'expected', 'actual'))
tests.append(('short', '', True, 'expected', 'actual'))
elif grader_path.endswith('/incorrect'):
tests.append(('short', 'long', False, 'expected', 'actual'))
errors.append('THIS IS AN ERROR')
errors.append('\x00\xc3\x83\xc3\xb8\x02')
try:
from codejail import jail_code
except ImportError:
tests.append(("codejail", "codejail not installed", True, "", ""))
else:
if jail_code.is_configured("python"):
tests.append(("codejail", "codejail configured", True, "", ""))
else:
tests.append(("codejail", "codejail not configured", True, "", ""))
results = {
'correct': correct,
'score': score,
'tests': tests,
'errors': errors,
}
return results
class GraderTests(unittest.TestCase):
def _make_payload(self, body, files=''):
return {
'xqueue_body': json.dumps(body),
'xqueue_files': files
}
def test_bad_payload(self):
g = MockGrader()
self.assertRaises(KeyError, g.process_item, {})
self.assertRaises(ValueError, g.process_item, {'xqueue_body': '', 'xqueue_files': ''})
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': 'blah'
})
self.assertRaises(ValueError, g.process_item, pl)
def test_no_grader(self):
g = grader.Grader()
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': json.dumps({
'grader': '/tmp/grader.py'
})
})
self.assertRaises(NotImplementedError, g.process_item, pl)
# grader that doesn't exist
self.assertRaises(Exception, grader.Grader, gradepy='/asdfasdfdasf.py')
def test_correct_response(self):
g = MockGrader()
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': json.dumps({
'grader': 'correct'
})
})
reply = g.process_item(pl)
self.assertIn('result-correct', reply['msg'])
self.assertEqual(reply['correct'], 1)
self.assertEqual(reply['score'], 1)
def test_incorrect_response(self):
g = MockGrader()
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': json.dumps({
'grader': 'incorrect'
})
})
reply = g.process_item(pl)
self.assertIn('result-incorrect', reply['msg'])
self.assertIn('THIS IS AN ERROR', reply['msg'])
self.assertEqual(reply['correct'], 0)
self.assertEqual(reply['score'], 0)
def test_response_on_queue(self):
g = MockGrader()
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': json.dumps({
'grader': 'correct'
})
})
q = Queue()
reply = g.process_item(pl, queue=q)
popped = q.get()
self.assertEqual(reply, popped)
del pl['xqueue_body']
try:
g.process_item(pl, queue=q)
except Exception as e:
popped = q.get()
self.assertEqual(e, popped)
def test_subprocess(self):
g = MockGrader()
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': json.dumps({
'grader': 'correct'
})
})
reply = g(pl)
self.assertEqual(reply['correct'], 1)
del pl['xqueue_body']
self.assertRaises(KeyError, g, pl)
def test_no_fork(self):
g = MockGrader(fork_per_item=False)
pl = self._make_payload({
'student_response': 'blah',
'grader_payload': json.dumps({
'grader': 'correct'
})
})
reply = g(pl)
self.assertEqual(reply['correct'], 1)
| agpl-3.0 |
jonathan-beard/edx-platform | common/djangoapps/edxmako/shortcuts.py | 67 | 5272 | # Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.template import Context
from django.http import HttpResponse
import logging
from microsite_configuration import microsite
from edxmako import lookup_template
from edxmako.middleware import get_template_request_context
from django.conf import settings
from django.core.urlresolvers import reverse
log = logging.getLogger(__name__)
def marketing_link(name):
"""Returns the correct URL for a link to the marketing site
depending on if the marketing site is enabled
Since the marketing site is enabled by a setting, we have two
possible URLs for certain links. This function is to decides
which URL should be provided.
"""
# link_map maps URLs from the marketing site to the old equivalent on
# the Django site
link_map = settings.MKTG_URL_LINK_MAP
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site and name in settings.MKTG_URLS:
# special case for when we only want the root marketing URL
if name == 'ROOT':
return settings.MKTG_URLS.get('ROOT')
return settings.MKTG_URLS.get('ROOT') + settings.MKTG_URLS.get(name)
# only link to the old pages when the marketing site isn't on
elif not enable_mktg_site and name in link_map:
# don't try to reverse disabled marketing links
if link_map[name] is not None:
return reverse(link_map[name])
else:
log.debug("Cannot find corresponding link for name: %s", name)
return '#'
def marketing_link_context_processor(request):
"""
A django context processor to give templates access to marketing URLs
Returns a dict whose keys are the marketing link names usable with the
marketing_link method (e.g. 'ROOT', 'CONTACT', etc.) prefixed with
'MKTG_URL_' and whose values are the corresponding URLs as computed by the
marketing_link method.
"""
return dict(
[
("MKTG_URL_" + k, marketing_link(k))
for k in (
settings.MKTG_URL_LINK_MAP.viewkeys() |
settings.MKTG_URLS.viewkeys()
)
]
)
def open_source_footer_context_processor(request):
"""
Checks the site name to determine whether to use the edX.org footer or the Open Source Footer.
"""
return dict(
[
("IS_EDX_DOMAIN", settings.FEATURES.get('IS_EDX_DOMAIN', False))
]
)
def microsite_footer_context_processor(request):
"""
Checks the site name to determine whether to use the edX.org footer or the Open Source Footer.
"""
return dict(
[
("IS_REQUEST_IN_MICROSITE", microsite.is_request_in_microsite())
]
)
def render_to_string(template_name, dictionary, context=None, namespace='main'):
# see if there is an override template defined in the microsite
template_name = microsite.get_template_path(template_name)
context_instance = Context(dictionary)
# add dictionary to context_instance
context_instance.update(dictionary or {})
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
context_instance['settings'] = settings
context_instance['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_instance['marketing_link'] = marketing_link
# In various testing contexts, there might not be a current request context.
request_context = get_template_request_context()
if request_context:
for item in request_context:
context_dictionary.update(item)
for item in context_instance:
context_dictionary.update(item)
if context:
context_dictionary.update(context)
# "Fix" CSRF token by evaluating the lazy object
KEY_CSRF_TOKENS = ('csrf_token', 'csrf')
for key in KEY_CSRF_TOKENS:
if key in context_dictionary:
context_dictionary[key] = unicode(context_dictionary[key])
# fetch and render template
template = lookup_template(namespace, template_name)
return template.render_unicode(**context_dictionary)
def render_to_response(template_name, dictionary=None, context_instance=None, namespace='main', **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
lookup.get_template(args[0]).render with the passed arguments.
"""
# see if there is an override template defined in the microsite
template_name = microsite.get_template_path(template_name)
dictionary = dictionary or {}
return HttpResponse(render_to_string(template_name, dictionary, context_instance, namespace), **kwargs)
| agpl-3.0 |
tpummer/fm-pw6-forbiddenisle-tombot | game.py | 1 | 3338 | #fm-pw6-forbiddenisle aka TomBot - Bot for the Freies Magazin 6th programming challenge
# Copyright (C) 2013 Thomas Pummer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gameBoard
import gameRound as r
import gameBot as bot
import inputReader as reader
class game(object):
def __init__(self, inputReader):
self.inputReader = inputReader
self.gameBoard = gameBoard.gameBoard(1,1)
self.round = r.gameRound()
self.bot = bot.gameBot()
self.flood = 0
def run(self):
runningGame = True
while(runningGame):
inputText = self.inputReader.read();
if( inputText == 'END'):
runningGame = False
elif(inputText.startswith('GAMEBOARDSTART')):
self.gameBoard = self.readGameBoard(inputText)
elif(inputText.startswith('ROUND')):
self.reportRound(inputText, self.round, self.bot)
self.bot.makeTurn(self.gameBoard)
elif(inputText.startswith('INCRFLOOD')):
self.incrFlood(inputText);
elif(inputText.startswith('FLOOD')):
self.floodField(inputText);
def floodField(self,inputText):
parts = inputText.split(' ')
coordinates = reader.coordinateTextToTuple(parts[1])
self.gameBoard.floodField(coordinates[0],coordinates[1])
def incrFlood(self,inputText):
additionalValue = int(inputText.split(' ')[1])
self.flood = self.flood + additionalValue
pass
def reportRound(self, inputText, r, bot):
parts = inputText.split(' ')
r.setRound(int(parts[1]))
coordinates = reader.coordinateTextToTuple(parts[2])
bot.setField(self.gameBoard.getField(coordinates[0],coordinates[1]))
def readGameBoard(self,startline):
readBoard = True
indexOfSpace = startline.index(' ');
indexOfSemikolon = startline.index(',');
xGameBoard = int(startline[indexOfSpace+1:indexOfSemikolon])
yGameBoard = int(startline[indexOfSemikolon+1:])
board = gameBoard.gameBoard(xGameBoard,yGameBoard)
#read y zeilen mit x zeichen
y = 0
while(readBoard):
inputText = self.inputReader.read();
if(inputText == 'GAMEBOARDEND'):
readBoard = False;
else:
for x in range(0,xGameBoard):
board.setField(x,y,inputText[x:x+1])
y = y + 1
board.updateFloodCount()
return board
def getFlood(self):
return self.flood
def getGameBoard(self):
return self.gameBoard
def getBot(self):
return self.bot
| gpl-3.0 |
ollie314/cassandra | pylib/cqlshlib/test/test_cqlsh_commands.py | 32 | 1219 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# and $CQL_TEST_PORT to the associated port.
from .basecase import BaseTestCase, cqlsh
class TestCqlshCommands(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_show(self):
pass
def test_describe(self):
pass
def test_exit(self):
pass
def test_help(self):
pass
| apache-2.0 |
loseblue/vim-ycm-windows-64 | third_party/ycmd/third_party/jedi/test/test_evaluate/test_absolute_import.py | 11 | 1207 | """
Tests ``from __future__ import absolute_import`` (only important for
Python 2.X)
"""
import jedi
from jedi._compatibility import u
from jedi.parser import Parser
from .. import helpers
def test_explicit_absolute_imports():
"""
Detect modules with ``from __future__ import absolute_import``.
"""
parser = Parser(u("from __future__ import absolute_import"), "test.py")
assert parser.module.has_explicit_absolute_import
def test_no_explicit_absolute_imports():
"""
Detect modules without ``from __future__ import absolute_import``.
"""
parser = Parser(u("1"), "test.py")
assert not parser.module.has_explicit_absolute_import
def test_dont_break_imports_without_namespaces():
"""
The code checking for ``from __future__ import absolute_import`` shouldn't
assume that all imports have non-``None`` namespaces.
"""
src = u("from __future__ import absolute_import\nimport xyzzy")
parser = Parser(src, "test.py")
assert parser.module.has_explicit_absolute_import
@helpers.cwd_at("test/test_evaluate/absolute_import")
def test_can_complete_when_shadowing():
script = jedi.Script(path="unittest.py")
assert script.completions()
| gpl-3.0 |
Taapat/enigma2-openpli-vuplus | keyids.py | 53 | 5285 | KEYIDS = {
"KEY_RESERVED": 0,
"KEY_ESC": 1,
"KEY_1": 2,
"KEY_2": 3,
"KEY_3": 4,
"KEY_4": 5,
"KEY_5": 6,
"KEY_6": 7,
"KEY_7": 8,
"KEY_8": 9,
"KEY_9": 10,
"KEY_0": 11,
"KEY_MINUS": 12,
"KEY_EQUAL": 13,
"KEY_BACKSPACE": 14,
"KEY_TAB": 15,
"KEY_Q": 16,
"KEY_W": 17,
"KEY_E": 18,
"KEY_R": 19,
"KEY_T": 20,
"KEY_Y": 21,
"KEY_U": 22,
"KEY_I": 23,
"KEY_O": 24,
"KEY_P": 25,
"KEY_LEFTBRACE": 26,
"KEY_RIGHTBRACE": 27,
"KEY_ENTER": 28,
"KEY_LEFTCTRL": 29,
"KEY_A": 30,
"KEY_S": 31,
"KEY_D": 32,
"KEY_F": 33,
"KEY_G": 34,
"KEY_H": 35,
"KEY_J": 36,
"KEY_K": 37,
"KEY_L": 38,
"KEY_SEMICOLON": 39,
"KEY_APOSTROPHE": 40,
"KEY_GRAVE": 41,
"KEY_LEFTSHIFT": 42,
"KEY_BACKSLASH": 43,
"KEY_Z": 44,
"KEY_X": 45,
"KEY_C": 46,
"KEY_V": 47,
"KEY_B": 48,
"KEY_N": 49,
"KEY_M": 50,
"KEY_COMMA": 51,
"KEY_DOT": 52,
"KEY_SLASH": 53,
"KEY_RIGHTSHIFT": 54,
"KEY_KPASTERISK": 55,
"KEY_LEFTALT": 56,
"KEY_SPACE": 57,
"KEY_CAPSLOCK": 58,
"KEY_F1": 59,
"KEY_F2": 60,
"KEY_F3": 61,
"KEY_F4": 62,
"KEY_F5": 63,
"KEY_F6": 64,
"KEY_F7": 65,
"KEY_F8": 66,
"KEY_F9": 67,
"KEY_F10": 68,
"KEY_NUMLOCK": 69,
"KEY_SCROLLLOCK": 70,
"KEY_KP7": 71,
"KEY_KP8": 72,
"KEY_KP9": 73,
"KEY_KPMINUS": 74,
"KEY_KP4": 75,
"KEY_KP5": 76,
"KEY_KP6": 77,
"KEY_KPPLUS": 78,
"KEY_KP1": 79,
"KEY_KP2": 80,
"KEY_KP3": 81,
"KEY_KP0": 82,
"KEY_KPDOT": 83,
"KEY_103RD": 84,
"KEY_F13": 85,
"KEY_102ND": 86,
"KEY_F11": 87,
"KEY_F12": 88,
"KEY_F14": 89,
"KEY_F15": 90,
"KEY_F16": 91,
"KEY_F17": 92,
"KEY_F18": 93,
"KEY_F19": 94,
"KEY_F20": 95,
"KEY_KPENTER": 96,
"KEY_RIGHTCTRL": 97,
"KEY_KPSLASH": 98,
"KEY_SYSRQ": 99,
"KEY_RIGHTALT": 100,
"KEY_LINEFEED": 101,
"KEY_HOME": 102,
"KEY_UP": 103,
"KEY_PAGEUP": 104,
"KEY_LEFT": 105,
"KEY_RIGHT": 106,
"KEY_END": 107,
"KEY_DOWN": 108,
"KEY_PAGEDOWN": 109,
"KEY_INSERT": 110,
"KEY_DELETE": 111,
"KEY_MACRO": 112,
"KEY_MUTE": 113,
"KEY_VOLUMEDOWN": 114,
"KEY_VOLUMEUP": 115,
"KEY_POWER": 116,
"KEY_KPEQUAL": 117,
"KEY_KPPLUSMINUS": 118,
"KEY_PAUSE": 119,
"KEY_F21": 120,
"KEY_F22": 121,
"KEY_F23": 122,
"KEY_F24": 123,
"KEY_KPCOMMA": 124,
"KEY_LEFTMETA": 125,
"KEY_RIGHTMETA": 126,
"KEY_COMPOSE": 127,
"KEY_STOP": 128,
"KEY_AGAIN": 129,
"KEY_PROPS": 130,
"KEY_UNDO": 131,
"KEY_FRONT": 132,
"KEY_COPY": 133,
"KEY_OPEN": 134,
"KEY_PASTE": 135,
"KEY_FIND": 136,
"KEY_CUT": 137,
"KEY_HELP": 138,
"KEY_MENU": 139,
"KEY_CALC": 140,
"KEY_SETUP": 141,
"KEY_SLEEP": 142,
"KEY_WAKEUP": 143,
"KEY_FILE": 144,
"KEY_SENDFILE": 145,
"KEY_DELETEFILE": 146,
"KEY_XFER": 147,
"KEY_PROG1": 148,
"KEY_PROG2": 149,
"KEY_WWW": 150,
"KEY_MSDOS": 151,
"KEY_COFFEE": 152,
"KEY_DIRECTION": 153,
"KEY_CYCLEWINDOWS": 154,
"KEY_MAIL": 155,
"KEY_BOOKMARKS": 156,
"KEY_COMPUTER": 157,
"KEY_BACK": 158,
"KEY_FORWARD": 159,
"KEY_CLOSECD": 160,
"KEY_EJECTCD": 161,
"KEY_EJECTCLOSECD": 162,
"KEY_NEXTSONG": 163,
"KEY_PLAYPAUSE": 164,
"KEY_PREVIOUSSONG": 165,
"KEY_STOPCD": 166,
"KEY_RECORD": 167,
"KEY_REWIND": 168,
"KEY_PHONE": 169,
"KEY_ISO": 170,
"KEY_CONFIG": 171,
"KEY_HOMEPAGE": 172,
"KEY_REFRESH": 173,
"KEY_EXIT": 174,
"KEY_MOVE": 175,
"KEY_EDIT": 176,
"KEY_SCROLLUP": 177,
"KEY_SCROLLDOWN": 178,
"KEY_KPLEFTPAREN": 179,
"KEY_KPRIGHTPAREN": 180,
"KEY_INTL1": 181,
"KEY_INTL2": 182,
"KEY_INTL3": 183,
"KEY_INTL4": 184,
"KEY_INTL5": 185,
"KEY_INTL6": 186,
"KEY_INTL7": 187,
"KEY_INTL8": 188,
"KEY_INTL9": 189,
"KEY_LANG1": 190,
"KEY_LANG2": 191,
"KEY_LANG3": 192,
"KEY_LANG4": 193,
"KEY_LANG5": 194,
"KEY_LANG6": 195,
"KEY_LANG7": 196,
"KEY_LANG8": 197,
"KEY_LANG9": 198,
"KEY_PLAYCD": 200,
"KEY_PAUSECD": 201,
"KEY_PROG3": 202,
"KEY_PROG4": 203,
"KEY_SUSPEND": 205,
"KEY_CLOSE": 206,
"KEY_PLAY": 207,
"KEY_FASTFORWARD": 208,
"KEY_BASSBOOST": 209,
"KEY_PRINT": 210,
"KEY_HP": 211,
"KEY_CAMERA": 212,
"KEY_SOUND": 213,
"KEY_QUESTION": 214,
"KEY_EMAIL": 215,
"KEY_CHAT": 216,
"KEY_SEARCH": 217,
"KEY_CONNECT": 218,
"KEY_FINANCE": 219,
"KEY_SPORT": 220,
"KEY_SHOP": 221,
"KEY_ALTERASE": 222,
"KEY_CANCEL": 223,
"KEY_BRIGHTNESSDOWN": 224,
"KEY_BRIGHTNESSUP": 225,
"KEY_MEDIA": 226,
"KEY_VMODE": 227,
"KEY_UNKNOWN": 240,
"KEY_OK": 352,
"KEY_SELECT": 353,
"KEY_GOTO": 354,
"KEY_CLEAR": 355,
"KEY_POWER2": 356,
"KEY_OPTION": 357,
"KEY_INFO": 358,
"KEY_TIME": 359,
"KEY_VENDOR": 360,
"KEY_ARCHIVE": 361,
"KEY_PROGRAM": 362,
"KEY_CHANNEL": 363,
"KEY_FAVORITES": 364,
"KEY_EPG": 365,
"KEY_PVR": 366,
"KEY_MHP": 367,
"KEY_LANGUAGE": 368,
"KEY_TITLE": 369,
"KEY_SUBTITLE": 370,
"KEY_ANGLE": 371,
"KEY_ZOOM": 372,
"KEY_MODE": 373,
"KEY_KEYBOARD": 374,
"KEY_SCREEN": 375,
"KEY_PC": 376,
"KEY_TV": 377,
"KEY_TV2": 378,
"KEY_VCR": 379,
"KEY_VCR2": 380,
"KEY_SAT": 381,
"KEY_SAT2": 382,
"KEY_CD": 383,
"KEY_TAPE": 384,
"KEY_RADIO": 385,
"KEY_TUNER": 386,
"KEY_PLAYER": 387,
"KEY_TEXT": 388,
"KEY_DVD": 389,
"KEY_AUX": 390,
"KEY_MP3": 391,
"KEY_AUDIO": 392,
"KEY_VIDEO": 393,
"KEY_DIRECTORY": 394,
"KEY_LIST": 395,
"KEY_MEMO": 396,
"KEY_CALENDAR": 397,
"KEY_RED": 398,
"KEY_GREEN": 399,
"KEY_YELLOW": 400,
"KEY_BLUE": 401,
"KEY_CHANNELUP": 402,
"KEY_CHANNELDOWN": 403,
"KEY_FIRST": 404,
"KEY_LAST": 405,
"KEY_AB": 406,
"KEY_NEXT": 407,
"KEY_RESTART": 408,
"KEY_SLOW": 409,
"KEY_SHUFFLE": 410,
"KEY_BREAK": 411,
"KEY_PREVIOUS": 412,
"KEY_DIGITS": 413,
"KEY_TEEN": 414,
"KEY_TWEN": 415,
"KEY_CONTEXT_MENU": 438,
"KEY_DEL_EOL": 448,
"KEY_DEL_EOS": 449,
"KEY_INS_LINE": 450,
"KEY_DEL_LINE": 451,
"KEY_ASCII": 510,
"KEY_MAX": 511,
"BTN_0": 256,
"BTN_1": 257,
}
| gpl-2.0 |
google/it-cert-automation | Course1/snippets/C1M2.py | 1 | 5737 | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These are the snippets shown during the demo videos in C1M2
# Each snippet is followed by the corresponding output when executed in the
# Python interpreter.
print(7+8)
# >>> print(7+8)
# 15
print("hello "+ "world")
# >>> print("hello "+ "world")
# hello world
print(7+"8")
# >>> print(7+"8")
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: unsupported operand type(s) for +: 'int' and 'str'
print(type("a"))
# >>> print(type("a"))
# <class 'str'>
print(type(2))
# >>> print(type(2))
# <class 'int'>
print(type(2.5))
# >>> print(type(2.5))
# <class 'float'>
length = 10
width = 2
area = length * width
print(area)
# >>> length = 10
# >>> width = 2
# >>> area = length * width
# >>> print(area)
# 20
print(7+8.5)
# >>> print(7+8.5)
# 15.5
print("a"+"b"+"c")
# >>> print("a"+"b"+"c")
# abc
base = 6
height = 3
area = (base*height)/2
print("The area of the triangle is: " + str(area))
# >>> base = 6
# >>> height = 3
# >>> area = (base*height)/2
# >>> print("The area of the triangle is: " + str(area))
# The area of the triangle is: 9.0
def greeting(name):
print("Welcome, " + name)
# >>> def greeting(name):
# ... print("Welcome, " + name)
# ...
greeting("Kay")
# >>> greeting("Kay")
# Welcome, Kay
greeting("Cameron")
# >>> greeting("Cameron")
# Welcome, Cameron
def greeting(name, department):
print("Welcome, " + name)
print("You are part of " + department)
# >>> def greeting(name, department):
# ... print("Welcome, " + name)
# ... print("You are part of " + department)
# ...
greeting("Blake", "IT Support")
# >>> greeting("Blake", "IT Support")
# Welcome, Blake
# You are part of IT Support
greeting("Ellis", "Software engineering")
# >>> greeting("Ellis", "Software engineering")
# Welcome, Ellis
# You are part of Software engineering
def area_triangle(base, height):
return base*height/2
# >>> def area_triangle(base, height):
# ... return base*height/2
# ...
area_a = area_triangle(5,4)
# >>> area_a = area_triangle(5,4)
area_b = area_triangle(7,3)
# >>> area_b = area_triangle(7,3)
sum = area_a + area_b
# >>> sum = area_a + area_b
print("The sum of both areas is: " + str(sum))
# >>> print("The sum of both areas is: " + str(sum))
# The sum of both areas is: 20.5
def convert_seconds(seconds):
hours = seconds // 3600
minutes = (seconds - hours * 3600) // 60
remaining_seconds = seconds - hours * 3600 - minutes * 60
return hours, minutes, remaining_seconds
# >>> def convert_seconds(seconds):
# ... hours = seconds // 3600
# ... minutes = (seconds - hours * 3600) // 60
# ... remaining_seconds = seconds - hours * 3600 - minutes * 60
# ... return hours, minutes, remaining_seconds
# ...
hours, minutes, seconds = convert_seconds(5000)
# >>> hours, minutes, seconds = convert_seconds(5000)
print(hours, minutes, seconds)
# >>> print(hours, minutes, seconds)
# 1 23 20
def greeting(name):
print("Welcome, " + name)
# >>> result = greeting("Jo")
# Welcome, Jo
print(result)
# >>> print(result)
# None
name = "Kay"
number = len(name) * 9
print("Hello " + name + ". Your lucky number is " + str(number))
# >>> name = "Kay"
# >>> number = len(name) * 9
# >>>
# >>> print("Hello " + name + ". Your lucky number is " + str(number))
# Hello Kay. Your lucky number is 27
name = "Cameron"
number = len(name) * 9
print("Hello " + name + ". Your lucky number is " + str(number))
# >>> name = "Cameron"
# >>> number = len(name) * 9
# >>>
# >>> print("Hello " + name + ". Your lucky number is " + str(number))
# Hello Cameron. Your lucky number is 63
def lucky_number(name):
number = len(name) * 9
print("Hello " + name + ". Your lucky number is " + str(number))
lucky_number("Kay")
lucky_number("Cameron")
# >>> def lucky_number(name):
# ... number = len(name) * 9
# ... print("Hello " + name + ". Your lucky number is " + str(number))
# ...
# >>> lucky_number("Kay")
# Hello Kay. Your lucky number is 27
# >>> lucky_number("Cameron")
# Hello Cameron. Your lucky number is 63
def calculate(d):
q = 3.14
z = q * (d ** 2)
print(z)
# >>> def calculate(d):
# ... q = 3.14
# ... z = q * (d ** 2)
# ... print(z)
# ...
calculate(5)
# >>> calculate(5)
# 78.5
def circle_area(radius):
pi = 3.14
area = pi * (radius ** 2)
print(area)
# >>> def circle_area(radius):
# ... pi = 3.14
# ... area = pi * (radius ** 2)
# ... print(area)
# ...
circle_area(5)
# >>> circle_area(5)
# 78.5
print(10>1)
# >>> print(10>1)
# True
print("cat" == "dog")
# >>> print("cat" == "dog")
# False
print (1 != 2)
# >>> print (1 != 2)
# True
print(1 < "1")
# >>> print(1 < "1")
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: '<' not supported between instances of 'int' and 'str'
print(1 == "1")
# >>> print(1 == "1")
# False
print("Yellow" > "Cyan" and "Brown" > "Magenta")
# >>> print("Yellow" > "Cyan" and "Brown" > "Magenta")
# False
print(25 > 50 or 1 != 2)
# >>> print(25 > 50 or 1 != 2)
# True
print(not 42 == "Answer")
# >>> print(not 42 == "Answer")
# True
| apache-2.0 |
monash-merc/karaage | karaage/legacy/admin/south_migrations/00001_initial_create.py | 3 | 8990 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.db import connection
class Migration(SchemaMigration):
def forwards(self, orm):
cursor = connection.cursor()
if 'django_admin_log' not in connection.introspection.get_table_list(cursor):
db.create_table('django_admin_log', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('action_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['people.Person'], null=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('object_repr', self.gf('django.db.models.fields.CharField')(max_length=200)),
('action_flag', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('change_message', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('admin', ['LogEntry'])
models = {
'admin.logentry': {
'Meta': {'ordering': "('-action_time',)", 'object_name': 'LogEntry', 'db_table': "u'admin_log'"},
'action_flag': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'action_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'institutes.institute': {
'Meta': {'ordering': "['name']", 'object_name': 'Institute', 'db_table': "'institute'"},
'delegates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'delegate'", 'to': "orm['people.Person']", 'through': "orm['institutes.InstituteDelegate']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'saml_entityid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'institutes.institutedelegate': {
'Meta': {'object_name': 'InstituteDelegate', 'db_table': "'institutedelegate'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'people.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['people.Person']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'people.person': {
'Meta': {'ordering': "['full_name', 'short_name']", 'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'db_index': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'legacy_ldap_password': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'saml_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['admin']
| gpl-3.0 |
ArtsiomCh/tensorflow | tensorflow/contrib/boosted_trees/python/kernel_tests/split_handler_ops_test.py | 30 | 19467 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow split handler Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SplitHandlerOpsTest(test_util.TensorFlowTestCase):
def testMakeDenseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Dense Quantile |
# (1.2, 0.2) | 0 | 0 |
# (-0.3, 0.19) | 0 | 1 |
# (4.0, 0.13) | 1 | 1 |
partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant([0, 1, 1], dtype=dtypes.int64)
gradients = array_ops.constant([2.4, -0.6, 8.0])
hessians = array_ops.constant([0.4, 0.38, 0.26])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.3 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.3 + 0.1)
expected_right_gain = 0.033613445378151252
# (-0.3 + 1.2 - 0.1) ** 2 / (0.19 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
expected_right_weight = 0
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# There's only one active bucket here so zero gain is expected.
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testMakeMulticlassDenseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant([0, 1, 1], dtype=dtypes.int64)
gradients = array_ops.constant([[2.4, 3.0], [-0.6, 0.1], [8.0, 1.0]])
hessians = array_ops.constant([[[0.4, 1], [1, 1]], [[0.38, 1], [1, 1]],
[[0.26, 1], [1, 1]]])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testMakeDenseSplitEmptyInputs(self):
"""Tests empty inputs op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([], dtype=dtypes.int32)
bucket_ids = array_ops.constant([], dtype=dtypes.int64)
gradients = array_ops.constant([])
hessians = array_ops.constant([])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=0,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
# .assertEmpty doesn't exist on ubuntu-contrib
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
def testMakeSparseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | bucket ID |
# (0.9, 0.39) | 0 | -1 |
# (1.2, 0.2) | 0 | 0 |
# (0.2, 0.12) | 0 | 1 |
# (4.0, 0.13) | 1 | -1 |
# (4.0, 0.13) | 1 | 1 |
partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant([-1, 0, 1, -1, 1], dtype=dtypes.int64)
gradients = array_ops.constant([1.8, 2.4, 0.4, 8.0, 8.0])
hessians = array_ops.constant([0.78, 0.4, 0.24, 0.26, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testMakeMulticlassSparseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant([-1, 0, 1, -1, 1], dtype=dtypes.int64)
gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0],
[8.0, 3.1], [8.0, 0.8]])
hessian_0 = [[0.78, 1], [12, 1]]
hessian_1 = [[0.4, 1], [1, 1]]
hessian_2 = [[0.24, 1], [1, 1]]
hessian_3 = [[0.26, 1], [1, 1]]
hessian_4 = [[0.26, 1], [1, 1]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3, hessian_4])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testMakeCategoricalEqualitySplit(self):
"""Tests split handler op for categorical equality split."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Feature ID |
# (0.9, 0.39) | 0 | -1 |
# (0.2, 0.12) | 0 | 1 |
# (1.4, 0.32) | 0 | 2 |
# (4.0, 0.13) | 1 | -1 |
# (4.0, 0.13) | 1 | 1 |
gradients = [1.8, 0.4, 2.8, 8.0, 8.0]
hessians = [0.78, 0.24, 0.64, 0.26, 0.26]
partition_ids = [0, 0, 0, 1, 1]
feature_ids = array_ops.constant([-1, 1, 2, -1, 1], dtype=dtypes.int64)
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=2,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testMakeMulticlassCategoricalEqualitySplit(self):
"""Tests split handler op for categorical equality split in multiclass."""
with self.test_session() as sess:
gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0],
[9.0, 3.1], [3.0, 0.8]])
hessian_0 = [[0.78, 1], [12, 1]]
hessian_1 = [[0.4, 1], [1, 1]]
hessian_2 = [[0.24, 1], [1, 1]]
hessian_3 = [[0.16, 2], [-1, 1]]
hessian_4 = [[0.6, 1], [2, 1]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3, hessian_4])
partition_ids = [0, 0, 0, 1, 1]
feature_ids = array_ops.constant([-1, 1, 2, -1, 1], dtype=dtypes.int64)
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=2,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testMakeCategoricalEqualitySplitEmptyInput(self):
with self.test_session() as sess:
gradients = []
hessians = []
partition_ids = []
feature_ids = []
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=0,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
rschroll/prsannots | prsannots/pdfcontent.py | 1 | 4222 | # Copyright 2012 Robert Schroll
#
# This file is part of prsannots and is distributed under the terms of
# the LGPL license. See the file COPYING for full details.
from pyPdf.pdf import ContentStream
from pyPdf.generic import ArrayObject, NameObject
class StupidSVGInterpreterError(Exception):
pass
# Help on adding contents to PDF pages from
# https://github.com/Averell7/pyPdf/commit/a7934266c2cb53a778e89beec2ab7d8111a17530
def pdf_add_content(content_string, page, scale=1, offsetx=0, offsety=0):
"""Add content to the end of the content stream of the PDF page.
Inputs: content_string The PDF drawing commands to add, as a single string.
page The pyPdf.pdf.PageObject to add the content to.
scale Before adding the content, adjust the the coordinate
offsetx system with a (uniform) scale factor and a
offsety translation of offsetx and offsety.
"""
coord_trans = '%.2f 0 0 %.2f %.2f %.2f cm' % (scale, scale, offsetx, offsety)
commands = '\n'.join(('Q', 'q', coord_trans, content_string, 'Q'))
try:
orig_content = page['/Contents'].getObject()
except KeyError:
orig_content = ArrayObject([])
stream = ContentStream(orig_content, page.pdf)
stream.operations.insert(0, [[], 'q']) # Existing content may not restore
stream.operations.append([[], commands]) # graphics state at the end.
page[NameObject('/Contents')] = stream
def svg_to_pdf_content(svg):
"""The world's worst SVG-to-PDF converter.
Convert the SVG document svg (a minidom.Node for the svg element) into
a string of PDF commands, suitable for use with pdf_add_content().
Currently, only supports stroked polyline elements, and only a few of
their attributes. Suitable for the SVG files produced by a Sony Reader,
and not much else.
"""
commands = []
# Flip coordinate system to SVG top-left origin
commands.append('1 0 0 -1 0 %s cm' % svg.getAttribute('height'))
# Switch to black strokes
commands.append('0 0 0 RG')
for node in svg.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
name = node.localName
try:
commands.extend(ELEMENT_FUNCS[name](node))
except KeyError:
raise StupidSVGInterpreterError, 'Cannot handle %s elements' % name
commands.insert(0, 'q') # Save graphics state
commands.append('Q') # ... and restore it
return '\n'.join(commands)
def polyline(node):
attr_func_map = {'stroke-width': lambda w: '%s w' % w,
'stroke-linecap': lambda lc: '%i J' % ('butt', 'round', 'square').index(lc),
'stroke-linejoin': lambda lj: '%i j' % ('miter', 'round', 'bevel').index(lj),
}
commands = []
for attr in attr_func_map:
attrval = node.getAttribute(attr)
if attrval:
commands.append(attr_func_map[attr](attrval))
pts = node.getAttribute('points').replace(',', ' ').split()
xs, ys = pts[2::2], pts[3::2]
segs = ['%s %s l' % (x, y) for x, y in zip(xs, ys)]
commands.append('%s %s m %s S' % (pts[0], pts[1], ' '.join(segs)))
commands.insert(0, 'q')
commands.append('Q')
return commands
ELEMENT_FUNCS = {'polyline': polyline}
if __name__ == '__main__':
import sys
from xml.dom import minidom
import pyPdf
if len(sys.argv) != 3:
print "Usage: %s file.pdf file.svg" % sys.argv[0]
sys.exit(1)
inpdf = pyPdf.PdfFileReader(open(sys.argv[1], 'rb'))
page = inpdf.pages[0]
doc = minidom.parse(sys.argv[2])
drawing = doc.getElementsByTagNameNS('http://www.sony.com/notepad', 'drawing')[0]
svg = doc.getElementsByTagNameNS('http://www.w3.org/2000/svg','svg')[0]
for attr in ('width', 'height'):
svg.setAttribute(attr, drawing.getAttribute(attr))
pdf_add_content(svg_to_pdf_content(svg), page)
outpdf = pyPdf.PdfFileWriter()
outpdf.addPage(page)
outpdf.write(open('pdfcontent.pdf', 'wb'))
print "Combined file output to pdfcontent.pdf"
| lgpl-3.0 |
emencia/emencia-django-socialaggregator | socialaggregator/migrations/0005_auto__add_field_ressource_language.py | 1 | 5298 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Ressource.language'
db.add_column(u'socialaggregator_ressource', 'language',
self.gf('django.db.models.fields.CharField')(default='fr', max_length=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Ressource.language'
db.delete_column(u'socialaggregator_ressource', 'language')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'socialaggregator.aggregator': {
'Meta': {'object_name': 'Aggregator'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['socialaggregator.Feed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'social_plugin': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'socialaggregator.feed': {
'Meta': {'object_name': 'Feed'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'socialaggregator.ressource': {
'Meta': {'ordering': "('-priority', 'name')", 'object_name': 'Ressource'},
'activate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['socialaggregator.Feed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'ressource_date': ('django.db.models.fields.DateTimeField', [], {}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'social_id': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'social_type': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['socialaggregator'] | agpl-3.0 |
rosmo/boto | tests/integration/cloudsearch/test_cert_verification.py | 126 | 1577 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.cloudsearch
class CloudSearchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
cloudsearch = True
regions = boto.cloudsearch.regions()
def sample_service_call(self, conn):
conn.describe_domains()
| mit |
Arcanemagus/SickRage | lib/guessit/rules/properties/container.py | 11 | 2338 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
container property
"""
from rebulk.remodule import re
from rebulk import Rebulk
from ..common import seps
from ..common.validators import seps_surround
from ...reutils import build_or_pattern
def container():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(name='container',
formatter=lambda value: value.strip(seps),
tags=['extension'],
conflict_solver=lambda match, other: other
if other.name in ['format', 'video_codec'] or
other.name == 'container' and 'extension' not in other.tags
else '__default__')
subtitles = ['srt', 'idx', 'sub', 'ssa', 'ass']
info = ['nfo']
videos = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2',
'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm',
'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'wav', 'webm', 'wma', 'wmv',
'iso', 'vob']
torrent = ['torrent']
nzb = ['nzb']
rebulk.regex(r'\.'+build_or_pattern(subtitles)+'$', exts=subtitles, tags=['extension', 'subtitle'])
rebulk.regex(r'\.'+build_or_pattern(info)+'$', exts=info, tags=['extension', 'info'])
rebulk.regex(r'\.'+build_or_pattern(videos)+'$', exts=videos, tags=['extension', 'video'])
rebulk.regex(r'\.'+build_or_pattern(torrent)+'$', exts=torrent, tags=['extension', 'torrent'])
rebulk.regex(r'\.'+build_or_pattern(nzb)+'$', exts=nzb, tags=['extension', 'nzb'])
rebulk.defaults(name='container',
validator=seps_surround,
formatter=lambda s: s.lower(),
conflict_solver=lambda match, other: match
if other.name in ['format',
'video_codec'] or other.name == 'container' and 'extension' in other.tags
else '__default__')
rebulk.string(*[sub for sub in subtitles if sub not in ['sub']], tags=['subtitle'])
rebulk.string(*videos, tags=['video'])
rebulk.string(*torrent, tags=['torrent'])
rebulk.string(*nzb, tags=['nzb'])
return rebulk
| gpl-3.0 |
livioc/selenium | py/test/selenium/webdriver/common/example2.py | 51 | 1326 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google_one_box import GoogleOneBox
from selenium.webdriver.firefox.webdriver import WebDriver
class ExampleTest2(unittest.TestCase):
"""This example shows how to use the page object pattern.
For more information about this pattern, see:
http://code.google.com/p/webdriver/wiki/PageObjects
"""
def setUp(self):
self._driver = WebDriver()
def tearDown(self):
self._driver.quit()
def testSearch(self):
google = GoogleOneBox(self._driver, "http://www.google.com")
res = google.search_for("cheese")
self.assertTrue(res.link_contains_match_for("Wikipedia"))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
theheros/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_simplesubclasses.py | 6 | 1344 | import unittest
from ctypes import *
class MyInt(c_int):
def __eq__(self, other):
if type(other) != MyInt:
return NotImplementedError
return self.value == other.value
class Test(unittest.TestCase):
def test_compare(self):
self.assertEqual(MyInt(3), MyInt(3))
self.assertNotEqual(MyInt(42), MyInt(43))
def test_ignore_retval(self):
# Test if the return value of a callback is ignored
# if restype is None
proto = CFUNCTYPE(None)
def func():
return (1, "abc", None)
cb = proto(func)
self.assertEqual(None, cb())
def test_int_callback(self):
args = []
def func(arg):
args.append(arg)
return arg
cb = CFUNCTYPE(None, MyInt)(func)
self.assertEqual(None, cb(42))
self.assertEqual(type(args[-1]), MyInt)
cb = CFUNCTYPE(c_int, c_int)(func)
self.assertEqual(42, cb(42))
self.assertEqual(type(args[-1]), int)
def test_int_struct(self):
class X(Structure):
_fields_ = [("x", MyInt)]
self.assertEqual(X().x, MyInt())
s = X()
s.x = MyInt(42)
self.assertEqual(s.x, MyInt(42))
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
jwbaker/pytvdb | tests/test_api.py | 1 | 4769 | import pytest
from pytvdb import TVDB
class TestTVDB:
@pytest.mark.unit
def test_build_list_of_models(self):
class TestObject:
def __init__(self, **kwargs):
self.value = kwargs.get('value')
data = [1, 2, 3, 4, 5]
f = TestObject
res = TVDB()._build_list_of_models(f, [{'value': x} for x in data])
for val, obj in zip(data, res):
assert val == obj.value
class TestSearch:
@pytest.mark.system
def test_search_by_name(self):
res = TVDB().search().series(name='Doctor Who')
assert len(res) == 12
@pytest.mark.system
def test_search_by_imdb_id(self):
res = TVDB().search().series(imdb_id='tt0436992')
assert len(res) == 1
@pytest.mark.system
def test_search_by_zap2it_id(self):
res = TVDB().search().series(zap2it_id='EP00750178')
assert len(res) == 1
@pytest.mark.system
def test_search_by_name_and_imdb(self):
with pytest.raises(ValueError):
assert not TVDB().search().series(name='Doctor Who', imdb_id='tt0436992')
@pytest.mark.system
def test_search_by_name_and_zap2it(self):
with pytest.raises(ValueError):
assert not TVDB().search().series(name='Doctor Who', zap2it_id='EP00750178')
@pytest.mark.system
def test_search_by_zap2it_and_imdb(self):
with pytest.raises(ValueError):
assert not TVDB().search().series(zap2it_id='EP00750178', imdb_id='tt0436992')
@pytest.mark.system
def test_search_by_name_and_zap2it_and_imdb(self):
with pytest.raises(ValueError):
assert not TVDB().search().series(name='Doctor Who', zap2it_id='EP00750178', imdb_id='tt0436992')
@pytest.mark.system
def test_search_with_version(self):
res = TVDB(version='2.1.1').search().series(name='Doctor Who')
assert len(res) == 12
@pytest.mark.system
def test_search_different_language(self):
res = TVDB(language='de').search().series(imdb_id='tt0436992')
assert len(res[0].aliases) == 0
assert res[0].overview == "Die Serie handelt von einem mysteriösen Außerirdischen namens „Der Doktor“, der " \
"mit seinem Raumschiff, der TARDIS (Time and Relative Dimension in Space), welches" \
" von außen aussieht wie eine englische Notruf-Telefonzelle der 60er Jahre, durch" \
" Raum und Zeit fliegt. Der Doktor ist ein Time Lord vom Planeten Gallifrey - und" \
" bereits über 900 Jahre alt. Dass man ihm das nicht ansieht, liegt vor allem" \
" daran, dass ein Time Lord, wenn er stirbt, in der Lage ist, sich zu regenerieren," \
" wobei er auch eine andere Gestalt annimmt."
@pytest.mark.system
def test_search_by_name_not_found(self):
res = TVDB().search().series(name='doct')
assert len(res) == 0
class TestSeries:
@pytest.mark.system
def test_get_series_by_id(self):
res = TVDB().series(76107)
assert res.series_name == "Doctor Who"
class TestSeriesActors:
@pytest.mark.system
def test_get_series_actors(self):
res = TVDB().series(76107).actors()
assert len(res) == 42
class TestSeriesEpisodes:
@pytest.mark.system
def test_single_page(self):
res = TVDB().series(78874).episodes()
assert len(res) == 18
@pytest.mark.system
def test_many_pages(self):
res = TVDB().series(76107).episodes()
assert len(res) == 809
@pytest.mark.system
def test_summary(self):
res = TVDB().series(76107).episodes().summary()
assert len(res.aired_seasons) == 27
assert res.aired_episodes == 809
assert res.dvd_seasons == []
assert res.dvd_episodes == 0
class TestEpisodes:
@pytest.mark.system
def test_get_episode(self):
res = TVDB().episodes(183284)
assert res.episode_name == 'Terror of the Zygons (2)'
assert res.directors == ['Douglas Camfield']
class TestUpdates:
@pytest.mark.system
def test_get_updates_less_than_one_week(self):
res = TVDB().updated().query(from_time=1503105261, to_time=1503191661)
assert len(res) == 330
@pytest.mark.system
@pytest.mark.xfail
# This is the documented behaviour of the API, but apparently does not actually happen
def test_get_updates_more_than_one_week(self):
short_res = TVDB().updated().query(from_time=1500513261, to_time=1500599661)
long_res = TVDB().updated().query(from_time=1500513261, to_time=1503710786)
assert len(short_res) == len(long_res) | unlicense |
ForNeVeR/styx-miranda | styx-client/protobuf-2.5.0/python/mox.py | 603 | 38237 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
| mit |
tmerrick1/spack | var/spack/repos/builtin/packages/r-dicekriging/package.py | 4 | 1672 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDicekriging(RPackage):
"""Estimation, validation and prediction of kriging models. Important
functions : km, print.km, plot.km, predict.km."""
homepage = "http://dice.emse.fr/"
url = "https://cran.r-project.org/src/contrib/DiceKriging_1.5.5.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/DiceKriging"
version('1.5.5', 'ee3e2d7a91d4a712467ef4f0b69c2844')
| lgpl-2.1 |
zcqHub/django-booking | booking/migrations/0008_move_data_from_old_model_to_hvad.py | 4 | 11224 | # flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# for every status
for status in orm['booking.BookingStatus'].objects.all():
# iterate over the old renamed translation instances
for statustrans_old in orm['booking.BookingStatusTranslationRenamed'].objects.filter(status=status):
orm['booking.BookingStatusTranslation'].objects.create(
name=statustrans_old.name,
language_code=statustrans_old.language,
master=status,
)
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'booking.booking': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Booking'},
'booking_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'booking_status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['booking.BookingStatus']", 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'date_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'forename': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sessions.Session']", 'null': 'True', 'blank': 'True'}),
'special_request': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'street1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'street2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'time_period': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_unit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '36', 'decimal_places': '28', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bookings'", 'null': 'True', 'to': u"orm['auth.User']"}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
u'booking.bookingerror': {
'Meta': {'object_name': 'BookingError'},
'booking': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['booking.Booking']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'})
},
u'booking.bookingitem': {
'Meta': {'ordering': "['-booking__creation_date']", 'object_name': 'BookingItem'},
'booking': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['booking.Booking']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'persons': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'subtotal': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '36', 'decimal_places': '28', 'blank': 'True'})
},
u'booking.bookingstatus': {
'Meta': {'object_name': 'BookingStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'booking.bookingstatustranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'BookingStatusTranslation', 'db_table': "u'booking_bookingstatus_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['booking.BookingStatus']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'booking.bookingstatustranslationrenamed': {
'Meta': {'object_name': 'BookingStatusTranslationRenamed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['booking.BookingStatus']"})
},
u'booking.extrapersoninfo': {
'Meta': {'ordering': "['-booking__creation_date']", 'object_name': 'ExtraPersonInfo'},
'arrival': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'booking': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['booking.Booking']"}),
'forename': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sessions.session': {
'Meta': {'object_name': 'Session', 'db_table': "'django_session'"},
'expire_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'session_data': ('django.db.models.fields.TextField', [], {}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['booking']
symmetrical = True
| mit |
iamutkarshtiwari/kivy | kivy/lang.py | 12 | 80219 | '''Kivy Language
=============
The Kivy language is a language dedicated to describing user interface and
interactions. You could compare this language to Qt's QML
(http://qt.nokia.com), but we included new concepts such as rule definitions
(which are somewhat akin to what you may know from CSS), templating and so on.
.. versionchanged:: 1.7.0
The Builder doesn't execute canvas expressions in realtime anymore. It will
pack all the expressions that need to be executed first and execute them
after dispatching input, just before drawing the frame. If you want to
force the execution of canvas drawing, just call
:meth:`Builder.sync <BuilderBase.sync>`.
An experimental profiling tool for the kv lang is also included. You can
activate it by setting the environment variable `KIVY_PROFILE_LANG=1`.
It will then generate an html file named `builder_stats.html`.
Overview
--------
The language consists of several constructs that you can use:
Rules
A rule is similar to a CSS rule. A rule applies to specific widgets (or
classes thereof) in your widget tree and modifies them in a
certain way.
You can use rules to specify interactive behaviour or use them to add
graphical representations of the widgets they apply to.
You can target a specific class of widgets (similar to the CSS
concept of a *class*) by using the ``cls`` attribute (e.g.
``cls=MyTestWidget``).
A Root Widget
You can use the language to create your entire user interface.
A kv file must contain only one root widget at most.
Dynamic Classes
*(introduced in version 1.7.0)*
Dynamic classes let you create new widgets and rules on-the-fly,
without any Python declaration.
Templates (deprecated)
*(introduced in version 1.0.5, deprecated from version 1.7.0)*
Templates were used to populate parts of an application, such as
styling the content of a list (e.g. icon on the left, text on the
right). They are now deprecated by dynamic classes.
Syntax of a kv File
-------------------
.. highlight:: kv
A Kivy language file must have ``.kv`` as filename extension.
The content of the file should always start with the Kivy header, where
`version` must be replaced with the Kivy language version you're using.
For now, use 1.0::
#:kivy `1.0`
# content here
The `content` can contain rule definitions, a root widget, dynamic class
definitions and templates::
# Syntax of a rule definition. Note that several Rules can share the same
# definition (as in CSS). Note the braces: they are part of the definition.
<Rule1,Rule2>:
# .. definitions ..
<Rule3>:
# .. definitions ..
# Syntax for creating a root widget
RootClassName:
# .. definitions ..
# Syntax for creating a dynamic class
<NewWidget@BaseClass>:
# .. definitions ..
# Syntax for create a template
[TemplateName@BaseClass1,BaseClass2]:
# .. definitions ..
Regardless of whether it's a rule, root widget, dynamic class or
template you're defining, the definition should look like this::
# With the braces it's a rule. Without them, it's a root widget.
<ClassName>:
prop1: value1
prop2: value2
canvas:
CanvasInstruction1:
canvasprop1: value1
CanvasInstruction2:
canvasprop2: value2
AnotherClass:
prop3: value1
Here `prop1` and `prop2` are the properties of `ClassName` and `prop3` is the
property of `AnotherClass`. If the widget doesn't have a property with
the given name, an :class:`~kivy.properties.ObjectProperty` will be
automatically created and added to the widget.
`AnotherClass` will be created and added as a child of the `ClassName`
instance.
- The indentation is important and must be consistent. The spacing must be a
multiple of the number of spaces used on the first indented line. Spaces
are encouraged: mixing tabs and spaces is not recommended.
- The value of a property must be given on a single line (for now at least).
- The `canvas` property is special: you can put graphics instructions in it
to create a graphical representation of the current class.
Here is a simple example of a kv file that contains a root widget::
#:kivy 1.0
Button:
text: 'Hello world'
.. versionchanged:: 1.7.0
The indentation is not limited to 4 spaces anymore. The spacing must be a
multiple of the number of spaces used on the first indented line.
Both the :meth:`~BuilderBase.load_file` and the
:meth:`~BuilderBase.load_string` methods
return the root widget defined in your kv file/string. They will also add any
class and template definitions to the :class:`~kivy.factory.Factory` for later
usage.
Value Expressions, on_property Expressions, ids and Reserved Keywords
---------------------------------------------------------------------
When you specify a property's value, the value is evaluated as a Python
expression. This expression can be static or dynamic, which means that
the value can use the values of other properties using reserved keywords.
self
The keyword self references the "current widget instance"::
Button:
text: 'My state is %s' % self.state
root
This keyword is available only in rule definitions and represents the
root widget of the rule (the first instance of the rule)::
<MyWidget>:
custom: 'Hello world'
Button:
text: root.custom
app
This keyword always refers to your app instance. It's equivalent
to a call to :meth:`kivy.app.App.get_running_app` in Python.::
Label:
text: app.name
args
This keyword is available in on_<action> callbacks. It refers to the
arguments passed to the callback.::
TextInput:
on_focus: self.insert_text("Focus" if args[1] else "No focus")
ids
~~~
Class definitions may contain ids which can be used as a keywords:::
<MyWidget>:
Button:
id: btn1
Button:
text: 'The state of the other button is %s' % btn1.state
Please note that the `id` will not be available in the widget instance:
it is used exclusively for external references. `id` is a weakref to the
widget, and not the widget itself. The widget itself can be accessed
with `id.__self__` (`btn1.__self__` in this case).
When the kv file is processed, weakrefs to all the widgets tagged with ids are
added to the root widgets `ids` dictionary. In other words, following on from
the example above, the buttons state could also be accessed as follows:
.. code-block:: python
widget = MyWidget()
state = widget.ids["btn1"].state
# Or, as an alternative syntax,
state = widget.ids.btn1.state
Note that the outermost widget applies the kv rules to all its inner widgets
before any other rules are applied. This means if an inner widget contains ids,
these ids may not be available during the inner widget's `__init__` function.
Valid expressons
~~~~~~~~~~~~~~~~
There are two places that accept python statments in a kv file:
after a property, which assigns to the property the result of the expression
(such as the text of a button as shown above) and after a on_property, which
executes the statement when the property is updated (such as on_state).
In the former case, the
`expression <http://docs.python.org/2/reference/expressions.html>`_ can only
span a single line, cannot be extended to multiple lines using newline
escaping, and must return a value. An example of a valid expression is
``text: self.state and ('up' if self.state == 'normal' else 'down')``.
In the latter case, multiple single line statements are valid including
multi-line statements that escape their newline, as long as they don't
add an indentation level.
Examples of valid statements are:
.. code-block:: python
on_press: if self.state == 'normal': print('normal')
on_state:
if self.state == 'normal': print('normal')
else: print('down')
if self.state == 'normal': \\
print('multiline normal')
for i in range(10): print(i)
print([1,2,3,4,
5,6,7])
An example of a invalid statement:
.. code-block:: python
on_state:
if self.state == 'normal':
print('normal')
Relation Between Values and Properties
--------------------------------------
When you use the Kivy language, you might notice that we do some work
behind the scenes to automatically make things work properly. You should
know that :doc:`api-kivy.properties` implement the
`Observer Design Pattern <http://en.wikipedia.org/wiki/Observer_pattern>`_.
That means that you can bind your own function to be
called when the value of a property changes (i.e. you passively
`observe` the property for potential changes).
The Kivy language detects properties in your `value` expression and will create
create callbacks to automatically update the property via your expression when
changes occur.
Here's a simple example that demonstrates this behaviour::
Button:
text: str(self.state)
In this example, the parser detects that `self.state` is a dynamic value (a
property). The :attr:`~kivy.uix.button.Button.state` property of the button
can change at any moment (when the user touches it).
We now want this button to display its own state as text, even as the state
changes. To do this, we use the state property of the Button and use it in the
value expression for the button's `text` property, which controls what text is
displayed on the button (We also convert the state to a string representation).
Now, whenever the button state changes, the text property will be updated
automatically.
Remember: The value is a python expression! That means that you can do
something more interesting like::
Button:
text: 'Plop world' if self.state == 'normal' else 'Release me!'
The Button text changes with the state of the button. By default, the button
text will be 'Plop world', but when the button is being pressed, the text will
change to 'Release me!'.
More precisely, the kivy language parser detects all substrings of the form
`X.a.b` where `X` is `self` or `root` or `app` or a known id, and `a` and `b`
are properties: it then adds the appropriate dependencies to cause the
the constraint to be reevaluated whenever something changes. For example,
this works exactly as expected::
<IndexedExample>:
beta: self.a.b[self.c.d]
However, due to limitations in the parser which hopefully may be lifted in the
future, the following doesn't work::
<BadExample>:
beta: self.a.b[self.c.d].e.f
indeed the `.e.f` part is not recognized because it doesn't follow the expected
pattern, and so, does not result in an appropriate dependency being setup.
Instead, an intermediate property should be introduced to allow the following
constraint::
<GoodExample>:
alpha: self.a.b[self.c.d]
beta: self.alpha.e.f
Graphical Instructions
----------------------
The graphical instructions are a special part of the Kivy language. They are
handled by the 'canvas' property definition::
Widget:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
size: self.size
pos: self.pos
All the classes added inside the canvas property must be derived from the
:class:`~kivy.graphics.Instruction` class. You cannot put any Widget class
inside the canvas property (as that would not make sense because a
widget is not a graphics instruction).
If you want to do theming, you'll have the same question as in CSS: which rules
have been executed first? In our case, the rules are executed
in processing order (i.e. top-down).
If you want to change how Buttons are rendered, you can create your own kv file
and add something like this::
<Button>:
canvas:
Color:
rgb: (1, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Rectangle:
pos: self.pos
size: self.texture_size
texture: self.texture
This will result in buttons having a red background with the label in the
bottom left, in addition to all the preceding rules.
You can clear all the previous instructions by using the `Clear` command::
<Button>:
canvas:
Clear
Color:
rgb: (1, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Rectangle:
pos: self.pos
size: self.texture_size
texture: self.texture
Then, only your rules that follow the `Clear` command will be taken into
consideration.
.. _dynamic_classes:
Dynamic classes
---------------
Dynamic classes allow you to create new widgets on-the-fly, without any python
declaration in the first place. The syntax of the dynamic classes is similar to
the Rules, but you need to specify the base classes you want to
subclass.
The syntax looks like:
.. code-block:: kv
# Simple inheritance
<NewWidget@Button>:
# kv code here ...
# Multiple inheritance
<NewWidget@ButtonBehavior+Label>:
# kv code here ...
The `@` character is used to seperate your class name from the classes you want
to subclass. The Python equivalent would have been:
.. code-block:: python
# Simple inheritance
class NewWidget(Button):
pass
# Multiple inheritance
class NewWidget(ButtonBehavior, Label):
pass
Any new properties, usually added in python code, should be declared
first. If the property doesn't exist in the dynamic class, it will be
automatically created as an :class:`~kivy.properties.ObjectProperty`
(pre 1.8.0) or as an appropriate typed property (from version
1.8.0).
.. versionchanged:: 1.8.0
If the property value is an expression that can be evaluated right away (no
external binding), then the value will be used as default value of the
property, and the type of the value will be used for the specialization of
the Property class. In other terms: if you declare `hello: "world"`, a new
:class:`~kivy.properties.StringProperty` will be instantiated, with the
default value `"world"`. Lists, tuples, dictionaries and strings are
supported.
Let's illustrate the usage of these dynamic classes with an
implementation of a basic Image button. We could derive our classes from
the Button and just add a property for the image filename:
.. code-block:: kv
<ImageButton@Button>:
source: None
Image:
source: root.source
pos: root.pos
size: root.size
# let's use the new classes in another rule:
<MainUI>:
BoxLayout:
ImageButton:
source: 'hello.png'
on_press: root.do_something()
ImageButton:
source: 'world.png'
on_press: root.do_something_else()
In Python, you can create an instance of the dynamic class as follows:
.. code-block:: python
from kivy.factory import Factory
button_inst = Factory.ImageButton()
.. note::
Using dynamic classes, a child class can be declared before it's parent.
This however, leads to the unintuitive situation where the parent
properties/methods override those of the child. Be careful if you choose
to do this.
.. _template_usage:
Templates
---------
.. versionchanged:: 1.7.0
Template usage is now deprecated. Please use Dynamic classes instead.
Syntax of templates
~~~~~~~~~~~~~~~~~~~
Using a template in Kivy requires 2 things :
#. a context to pass for the context (will be ctx inside template).
#. a kv definition of the template.
Syntax of a template:
.. code-block:: kv
# With only one base class
[ClassName@BaseClass]:
# .. definitions ..
# With more than one base class
[ClassName@BaseClass1,BaseClass2]:
# .. definitions ..
For example, for a list, you'll need to create a entry with a image on
the left, and a label on the right. You can create a template for making
that definition easier to use.
So, we'll create a template that uses 2 entries in the context: an image
filename and a title:
.. code-block:: kv
[IconItem@BoxLayout]:
Image:
source: ctx.image
Label:
text: ctx.title
Then in Python, you can instantiate the template using:
.. code-block:: python
from kivy.lang import Builder
# create a template with hello world + an image
# the context values should be passed as kwargs to the Builder.template
# function
icon1 = Builder.template('IconItem', title='Hello world',
image='myimage.png')
# create a second template with other information
ctx = {'title': 'Another hello world',
'image': 'myimage2.png'}
icon2 = Builder.template('IconItem', **ctx)
# and use icon1 and icon2 as other widget.
Template example
~~~~~~~~~~~~~~~~
Most of time, when you are creating a screen in the kv lang, you use a lot of
redefinitions. In our example, we'll create a Toolbar, based on a
BoxLayout, and put in a few :class:`~kivy.uix.image.Image` widgets that
will react to the *on_touch_down* event.:
.. code-block:: kv
<MyToolbar>:
BoxLayout:
Image:
source: 'data/text.png'
size: self.texture_size
size_hint: None, None
on_touch_down: self.collide_point(*args[1].pos) and\
root.create_text()
Image:
source: 'data/image.png'
size: self.texture_size
size_hint: None, None
on_touch_down: self.collide_point(*args[1].pos) and\
root.create_image()
Image:
source: 'data/video.png'
size: self.texture_size
size_hint: None, None
on_touch_down: self.collide_point(*args[1].pos) and\
root.create_video()
We can see that the size and size_hint attribute are exactly the same.
More than that, the callback in on_touch_down and the image are changing.
These can be the variable part of the template that we can put into a context.
Let's try to create a template for the Image:
.. code-block:: kv
[ToolbarButton@Image]:
# This is the same as before
size: self.texture_size
size_hint: None, None
# Now, we are using the ctx for the variable part of the template
source: 'data/%s.png' % ctx.image
on_touch_down: self.collide_point(*args[1].pos) and ctx.callback()
The template can be used directly in the MyToolbar rule:
.. code-block:: kv
<MyToolbar>:
BoxLayout:
ToolbarButton:
image: 'text'
callback: root.create_text
ToolbarButton:
image: 'image'
callback: root.create_image
ToolbarButton:
image: 'video'
callback: root.create_video
That's all :)
Template limitations
~~~~~~~~~~~~~~~~~~~~
When you are creating a context:
#. you cannot use references other than "root":
.. code-block:: kv
<MyRule>:
Widget:
id: mywidget
value: 'bleh'
Template:
ctxkey: mywidget.value # << fail, this references the id
# mywidget
#. not all of the dynamic parts will be understood:
.. code-block:: kv
<MyRule>:
Template:
ctxkey: 'value 1' if root.prop1 else 'value2' # << even if
# root.prop1 is a property, if it changes value, ctxkey
# will not be updated
Redefining a widget's style
---------------------------
Sometimes we would like to inherit from a widget in order to use its Python
properties without also using its .kv defined style. For example, we would
like to inherit from a Label, but we would also like to define our own
canvas instructions instead of automatically using the canvas instructions
inherited from the Label. We can achieve this by prepending a dash (-) before
the class name in the .kv style definition.
In myapp.py:
.. code-block:: python
class MyWidget(Label):
pass
and in my.kv:
.. code-block:: kv
<-MyWidget>:
canvas:
Color:
rgb: 1, 1, 1
Rectangle:
size: (32, 32)
MyWidget will now have a Color and Rectangle instruction in its canvas
without any of the instructions inherited from the Label.
Redefining a widget's property style
------------------------------------
Similar to :ref:`Redefining a widget's style`, sometimes we would like to
inherit from a widget, keep all its KV defined styles, except for the style
applied to a specific property. For example, we would
like to inherit from a :class:`~kivy.uix.button.Button`, but we would also
like to set our own `state_image`, rather then relying on the
`background_normal` and `background_down` values. We can achieve this by
prepending a dash (-) before the `state_image` property name in the .kv style
definition.
In myapp.py:
.. code-block:: python
class MyWidget(Button):
new_background = StringProperty('my_background.png')
and in my.kv:
.. code-block:: kv
<MyWidget>:
-state_image: self.new_background
MyWidget will now have a `state_image` background set only by `new_background`,
and not by any previous styles that may have set `state_image`.
.. note::
Although the previous rules are cleared, they are still applied during
widget constrction, and are only removed when the new rule with the dash
is reached. This means that initially, previous rules could be used to set
the property.
Lang Directives
---------------
You can use directives to add declarative commands, such as imports or constant
definitions, to the lang files. Directives are added as comments in the
following format:
.. code-block:: kv
#:<directivename> <options>
import <package>
~~~~~~~~~~~~~~~~
.. versionadded:: 1.0.5
Syntax:
.. code-block:: kv
#:import <alias> <package>
You can import a package by writing:
.. code-block:: kv
#:import os os
<Rule>:
Button:
text: os.getcwd()
Or more complex:
.. code-block:: kv
#:import ut kivy.utils
<Rule>:
canvas:
Color:
rgba: ut.get_random_color()
.. versionadded:: 1.0.7
You can directly import classes from a module:
.. code-block:: kv
#: import Animation kivy.animation.Animation
<Rule>:
on_prop: Animation(x=.5).start(self)
set <key> <expr>
~~~~~~~~~~~~~~~~
.. versionadded:: 1.0.6
Syntax:
.. code-block:: kv
#:set <key> <expr>
Set a key that will be available anywhere in the kv. For example:
.. code-block:: kv
#:set my_color (.4, .3, .4)
#:set my_color_hl (.5, .4, .5)
<Rule>:
state: 'normal'
canvas:
Color:
rgb: my_color if self.state == 'normal' else my_color_hl
include <file>
~~~~~~~~~~~~~~~~
.. versionadded:: 1.9.0
Syntax:
.. code-block:: kv
#:include [force] <file>
Includes an external kivy file. This allows you to split complex
widgets into their own files. If the include is forced, the file
will first be unloaded and then reloaded again. For example:
.. code-block:: kv
# Test.kv
#:include mycomponent.kv
#:include force mybutton.kv
<Rule>:
state: 'normal'
MyButton:
MyComponent:
.. code-block:: kv
# mycomponent.kv
#:include mybutton.kv
<MyComponent>:
MyButton:
.. code-block:: kv
# mybutton.kv
<MyButton>:
canvas:
Color:
rgb: (1.0, 0.0, 0.0)
Rectangle:
pos: self.pos
size: (self.size[0]/4, self.size[1]/4)
'''
import os
__all__ = ('Observable', 'Builder', 'BuilderBase', 'BuilderException', 'Parser',
'ParserException')
import codecs
import re
import sys
import traceback
import types
from re import sub, findall
from os import environ
from os.path import join
from copy import copy
from types import CodeType
from functools import partial
from collections import OrderedDict, defaultdict
from kivy.factory import Factory
from kivy.logger import Logger
from kivy.utils import QueryDict
from kivy.cache import Cache
from kivy import kivy_data_dir, require
from kivy.compat import PY2, iteritems, iterkeys
from kivy.context import register_context
from kivy.resources import resource_find
import kivy.metrics as Metrics
from kivy._event import Observable, EventDispatcher
trace = Logger.trace
global_idmap = {}
# late import
Instruction = None
# register cache for creating new classtype (template)
Cache.register('kv.lang')
# all previously included files
__KV_INCLUDES__ = []
# precompile regexp expression
lang_str = re.compile('([\'"][^\'"]*[\'"])')
lang_key = re.compile('([a-zA-Z_]+)')
lang_keyvalue = re.compile('([a-zA-Z_][a-zA-Z0-9_.]*\.[a-zA-Z0-9_.]+)')
lang_tr = re.compile('(_\()')
# class types to check with isinstance
if PY2:
_cls_type = (type, types.ClassType)
else:
_cls_type = (type, )
# all the widget handlers, used to correctly unbind all the callbacks then the
# widget is deleted
_handlers = defaultdict(partial(defaultdict, list))
class ProxyApp(object):
# proxy app object
# taken from http://code.activestate.com/recipes/496741-object-proxying/
__slots__ = ['_obj']
def __init__(self):
object.__init__(self)
object.__setattr__(self, '_obj', None)
def _ensure_app(self):
app = object.__getattribute__(self, '_obj')
if app is None:
from kivy.app import App
app = App.get_running_app()
object.__setattr__(self, '_obj', app)
# Clear cached application instance, when it stops
app.bind(on_stop=lambda instance:
object.__setattr__(self, '_obj', None))
return app
def __getattribute__(self, name):
object.__getattribute__(self, '_ensure_app')()
return getattr(object.__getattribute__(self, '_obj'), name)
def __delattr__(self, name):
object.__getattribute__(self, '_ensure_app')()
delattr(object.__getattribute__(self, '_obj'), name)
def __setattr__(self, name, value):
object.__getattribute__(self, '_ensure_app')()
setattr(object.__getattribute__(self, '_obj'), name, value)
def __bool__(self):
object.__getattribute__(self, '_ensure_app')()
return bool(object.__getattribute__(self, '_obj'))
def __str__(self):
object.__getattribute__(self, '_ensure_app')()
return str(object.__getattribute__(self, '_obj'))
def __repr__(self):
object.__getattribute__(self, '_ensure_app')()
return repr(object.__getattribute__(self, '_obj'))
global_idmap['app'] = ProxyApp()
global_idmap['pt'] = Metrics.pt
global_idmap['inch'] = Metrics.inch
global_idmap['cm'] = Metrics.cm
global_idmap['mm'] = Metrics.mm
global_idmap['dp'] = Metrics.dp
global_idmap['sp'] = Metrics.sp
# delayed calls are canvas expression triggered during an loop. It is one
# directional linked list of args to call call_fn with. Each element is a list
# whos last element points to the next list of args to execute when
# Builder.sync is called.
_delayed_start = None
class ParserException(Exception):
'''Exception raised when something wrong happened in a kv file.
'''
def __init__(self, context, line, message, cause=None):
self.filename = context.filename or '<inline>'
self.line = line
sourcecode = context.sourcecode
sc_start = max(0, line - 2)
sc_stop = min(len(sourcecode), line + 3)
sc = ['...']
for x in range(sc_start, sc_stop):
if x == line:
sc += ['>> %4d:%s' % (line + 1, sourcecode[line][1])]
else:
sc += [' %4d:%s' % (x + 1, sourcecode[x][1])]
sc += ['...']
sc = '\n'.join(sc)
message = 'Parser: File "%s", line %d:\n%s\n%s' % (
self.filename, self.line + 1, sc, message)
if cause:
message += '\n' + ''.join(traceback.format_tb(cause))
super(ParserException, self).__init__(message)
class BuilderException(ParserException):
'''Exception raised when the Builder failed to apply a rule on a widget.
'''
pass
class ParserRuleProperty(object):
'''Represent a property inside a rule.
'''
__slots__ = ('ctx', 'line', 'name', 'value', 'co_value',
'watched_keys', 'mode', 'count', 'ignore_prev')
def __init__(self, ctx, line, name, value, ignore_prev=False):
super(ParserRuleProperty, self).__init__()
#: Associated parser
self.ctx = ctx
#: Line of the rule
self.line = line
#: Name of the property
self.name = name
#: Value of the property
self.value = value
#: Compiled value
self.co_value = None
#: Compilation mode
self.mode = None
#: Watched keys
self.watched_keys = None
#: Stats
self.count = 0
#: whether previous rules targeting name should be cleared
self.ignore_prev = ignore_prev
def precompile(self):
name = self.name
value = self.value
# first, remove all the string from the value
tmp = sub(lang_str, '', self.value)
# detecting how to handle the value according to the key name
mode = self.mode
if self.mode is None:
self.mode = mode = 'exec' if name[:3] == 'on_' else 'eval'
if mode == 'eval':
# if we don't detect any string/key in it, we can eval and give the
# result
if re.search(lang_key, tmp) is None:
self.co_value = eval(value)
return
# ok, we can compile.
value = '\n' * self.line + value
self.co_value = compile(value, self.ctx.filename or '<string>', mode)
# for exec mode, we don't need to watch any keys.
if mode == 'exec':
return
# now, detect obj.prop
# first, remove all the string from the value
tmp = sub(lang_str, '', value)
idx = tmp.find('#')
if idx != -1:
tmp = tmp[:idx]
# detect key.value inside value, and split them
wk = list(set(findall(lang_keyvalue, tmp)))
if len(wk):
self.watched_keys = [x.split('.') for x in wk]
if findall(lang_tr, tmp):
if self.watched_keys:
self.watched_keys += [['_']]
else:
self.watched_keys = [['_']]
def __repr__(self):
return '<ParserRuleProperty name=%r filename=%s:%d ' \
'value=%r watched_keys=%r>' % (
self.name, self.ctx.filename, self.line + 1,
self.value, self.watched_keys)
class ParserRule(object):
'''Represents a rule, in terms of the Kivy internal language.
'''
__slots__ = ('ctx', 'line', 'name', 'children', 'id', 'properties',
'canvas_before', 'canvas_root', 'canvas_after',
'handlers', 'level', 'cache_marked', 'avoid_previous_rules')
def __init__(self, ctx, line, name, level):
super(ParserRule, self).__init__()
#: Level of the rule in the kv
self.level = level
#: Associated parser
self.ctx = ctx
#: Line of the rule
self.line = line
#: Name of the rule
self.name = name
#: List of children to create
self.children = []
#: Id given to the rule
self.id = None
#: Properties associated to the rule
self.properties = OrderedDict()
#: Canvas normal
self.canvas_root = None
#: Canvas before
self.canvas_before = None
#: Canvas after
self.canvas_after = None
#: Handlers associated to the rule
self.handlers = []
#: Properties cache list: mark which class have already been checked
self.cache_marked = []
#: Indicate if any previous rules should be avoided.
self.avoid_previous_rules = False
if level == 0:
self._detect_selectors()
else:
self._forbid_selectors()
def precompile(self):
for x in self.properties.values():
x.precompile()
for x in self.handlers:
x.precompile()
for x in self.children:
x.precompile()
if self.canvas_before:
self.canvas_before.precompile()
if self.canvas_root:
self.canvas_root.precompile()
if self.canvas_after:
self.canvas_after.precompile()
def create_missing(self, widget):
# check first if the widget class already been processed by this rule
cls = widget.__class__
if cls in self.cache_marked:
return
self.cache_marked.append(cls)
for name in self.properties:
if hasattr(widget, name):
continue
value = self.properties[name].co_value
if type(value) is CodeType:
value = None
widget.create_property(name, value)
def _forbid_selectors(self):
c = self.name[0]
if c == '<' or c == '[':
raise ParserException(
self.ctx, self.line,
'Selectors rules are allowed only at the first level')
def _detect_selectors(self):
c = self.name[0]
if c == '<':
self._build_rule()
elif c == '[':
self._build_template()
else:
if self.ctx.root is not None:
raise ParserException(
self.ctx, self.line,
'Only one root object is allowed by .kv')
self.ctx.root = self
def _build_rule(self):
name = self.name
if __debug__:
trace('Builder: build rule for %s' % name)
if name[0] != '<' or name[-1] != '>':
raise ParserException(self.ctx, self.line,
'Invalid rule (must be inside <>)')
# if the very first name start with a -, avoid previous rules
name = name[1:-1]
if name[:1] == '-':
self.avoid_previous_rules = True
name = name[1:]
rules = name.split(',')
for rule in rules:
crule = None
if not len(rule):
raise ParserException(self.ctx, self.line,
'Empty rule detected')
if '@' in rule:
# new class creation ?
# ensure the name is correctly written
rule, baseclasses = rule.split('@', 1)
if not re.match(lang_key, rule):
raise ParserException(self.ctx, self.line,
'Invalid dynamic class name')
# save the name in the dynamic classes dict.
self.ctx.dynamic_classes[rule] = baseclasses
crule = ParserSelectorName(rule)
else:
# classical selectors.
if rule[0] == '.':
crule = ParserSelectorClass(rule[1:])
elif rule[0] == '#':
crule = ParserSelectorId(rule[1:])
else:
crule = ParserSelectorName(rule)
self.ctx.rules.append((crule, self))
def _build_template(self):
name = self.name
if __debug__:
trace('Builder: build template for %s' % name)
if name[0] != '[' or name[-1] != ']':
raise ParserException(self.ctx, self.line,
'Invalid template (must be inside [])')
item_content = name[1:-1]
if not '@' in item_content:
raise ParserException(self.ctx, self.line,
'Invalid template name (missing @)')
template_name, template_root_cls = item_content.split('@')
self.ctx.templates.append((template_name, template_root_cls, self))
def __repr__(self):
return '<ParserRule name=%r>' % (self.name, )
class Parser(object):
'''Create a Parser object to parse a Kivy language file or Kivy content.
'''
PROP_ALLOWED = ('canvas.before', 'canvas.after')
CLASS_RANGE = list(range(ord('A'), ord('Z') + 1))
PROP_RANGE = (
list(range(ord('A'), ord('Z') + 1)) +
list(range(ord('a'), ord('z') + 1)) +
list(range(ord('0'), ord('9') + 1)) + [ord('_')])
__slots__ = ('rules', 'templates', 'root', 'sourcecode',
'directives', 'filename', 'dynamic_classes')
def __init__(self, **kwargs):
super(Parser, self).__init__()
self.rules = []
self.templates = []
self.root = None
self.sourcecode = []
self.directives = []
self.dynamic_classes = {}
self.filename = kwargs.get('filename', None)
content = kwargs.get('content', None)
if content is None:
raise ValueError('No content passed')
self.parse(content)
def execute_directives(self):
global __KV_INCLUDES__
for ln, cmd in self.directives:
cmd = cmd.strip()
if __debug__:
trace('Parser: got directive <%s>' % cmd)
if cmd[:5] == 'kivy ':
version = cmd[5:].strip()
if len(version.split('.')) == 2:
version += '.0'
require(version)
elif cmd[:4] == 'set ':
try:
name, value = cmd[4:].strip().split(' ', 1)
except:
Logger.exception('')
raise ParserException(self, ln, 'Invalid directive syntax')
try:
value = eval(value)
except:
Logger.exception('')
raise ParserException(self, ln, 'Invalid value')
global_idmap[name] = value
elif cmd[:8] == 'include ':
ref = cmd[8:].strip()
force_load = False
if ref[:6] == 'force ':
ref = ref[6:].strip()
force_load = True
if ref[-3:] != '.kv':
Logger.warn('WARNING: {0} does not have a valid Kivy'
'Language extension (.kv)'.format(ref))
break
if ref in __KV_INCLUDES__:
if not os.path.isfile(ref):
raise ParserException(self, ln,
'Invalid or unknown file: {0}'.format(ref))
if not force_load:
Logger.warn('WARNING: {0} has already been included!'
.format(ref))
break
else:
Logger.debug('Reloading {0} because include was forced.'
.format(ref))
Builder.unload_file(ref)
Builder.load_file(ref)
continue
Logger.debug('Including file: {0}'.format(0))
__KV_INCLUDES__.append(ref)
Builder.load_file(ref)
elif cmd[:7] == 'import ':
package = cmd[7:].strip()
l = package.split(' ')
if len(l) != 2:
raise ParserException(self, ln, 'Invalid import syntax')
alias, package = l
try:
if package not in sys.modules:
try:
mod = __import__(package)
except ImportError:
mod = __import__('.'.join(package.split('.')[:-1]))
# resolve the whole thing
for part in package.split('.')[1:]:
mod = getattr(mod, part)
else:
mod = sys.modules[package]
global_idmap[alias] = mod
except ImportError:
Logger.exception('')
raise ParserException(self, ln,
'Unable to import package %r' %
package)
else:
raise ParserException(self, ln, 'Unknown directive')
def parse(self, content):
'''Parse the contents of a Parser file and return a list
of root objects.
'''
# Read and parse the lines of the file
lines = content.splitlines()
if not lines:
return
num_lines = len(lines)
lines = list(zip(list(range(num_lines)), lines))
self.sourcecode = lines[:]
if __debug__:
trace('Parser: parsing %d lines' % num_lines)
# Strip all comments
self.strip_comments(lines)
# Execute directives
self.execute_directives()
# Get object from the first level
objects, remaining_lines = self.parse_level(0, lines)
# Precompile rules tree
for rule in objects:
rule.precompile()
# After parsing, there should be no remaining lines
# or there's an error we did not catch earlier.
if remaining_lines:
ln, content = remaining_lines[0]
raise ParserException(self, ln, 'Invalid data (not parsed)')
def strip_comments(self, lines):
'''Remove all comments from all lines in-place.
Comments need to be on a single line and not at the end of a line.
i.e. a comment line's first non-whitespace character must be a #.
'''
# extract directives
for ln, line in lines[:]:
stripped = line.strip()
if stripped[:2] == '#:':
self.directives.append((ln, stripped[2:]))
if stripped[:1] == '#':
lines.remove((ln, line))
if not stripped:
lines.remove((ln, line))
def parse_level(self, level, lines, spaces=0):
'''Parse the current level (level * spaces) indentation.
'''
indent = spaces * level if spaces > 0 else 0
objects = []
current_object = None
current_property = None
current_propobject = None
i = 0
while i < len(lines):
line = lines[i]
ln, content = line
# Get the number of space
tmp = content.lstrip(' \t')
# Replace any tab with 4 spaces
tmp = content[:len(content) - len(tmp)]
tmp = tmp.replace('\t', ' ')
# first indent designates the indentation
if spaces == 0:
spaces = len(tmp)
count = len(tmp)
if spaces > 0 and count % spaces != 0:
raise ParserException(self, ln,
'Invalid indentation, '
'must be a multiple of '
'%s spaces' % spaces)
content = content.strip()
rlevel = count // spaces if spaces > 0 else 0
# Level finished
if count < indent:
return objects, lines[i - 1:]
# Current level, create an object
elif count == indent:
x = content.split(':', 1)
if not len(x[0]):
raise ParserException(self, ln, 'Identifier missing')
if (len(x) == 2 and len(x[1]) and
not x[1].lstrip().startswith('#')):
raise ParserException(self, ln,
'Invalid data after declaration')
name = x[0]
# if it's not a root rule, then we got some restriction
# aka, a valid name, without point or everything else
if count != 0:
if False in [ord(z) in Parser.PROP_RANGE for z in name]:
raise ParserException(self, ln, 'Invalid class name')
current_object = ParserRule(self, ln, x[0], rlevel)
current_property = None
objects.append(current_object)
# Next level, is it a property or an object ?
elif count == indent + spaces:
x = content.split(':', 1)
if not len(x[0]):
raise ParserException(self, ln, 'Identifier missing')
# It's a class, add to the current object as a children
current_property = None
name = x[0]
ignore_prev = name[0] == '-'
if ignore_prev:
name = name[1:]
if ord(name[0]) in Parser.CLASS_RANGE:
if ignore_prev:
raise ParserException(
self, ln, 'clear previous, `-`, not allowed here')
_objects, _lines = self.parse_level(
level + 1, lines[i:], spaces)
current_object.children = _objects
lines = _lines
i = 0
# It's a property
else:
if name not in Parser.PROP_ALLOWED:
if not all(ord(z) in Parser.PROP_RANGE for z in name):
raise ParserException(self, ln,
'Invalid property name')
if len(x) == 1:
raise ParserException(self, ln, 'Syntax error')
value = x[1].strip()
if name == 'id':
if len(value) <= 0:
raise ParserException(self, ln, 'Empty id')
if value in ('self', 'root'):
raise ParserException(
self, ln,
'Invalid id, cannot be "self" or "root"')
current_object.id = value
elif len(value):
rule = ParserRuleProperty(
self, ln, name, value, ignore_prev)
if name[:3] == 'on_':
current_object.handlers.append(rule)
else:
ignore_prev = False
current_object.properties[name] = rule
else:
current_property = name
current_propobject = None
if ignore_prev: # it wasn't consumed
raise ParserException(
self, ln, 'clear previous, `-`, not allowed here')
# Two more levels?
elif count == indent + 2 * spaces:
if current_property in (
'canvas', 'canvas.after', 'canvas.before'):
_objects, _lines = self.parse_level(
level + 2, lines[i:], spaces)
rl = ParserRule(self, ln, current_property, rlevel)
rl.children = _objects
if current_property == 'canvas':
current_object.canvas_root = rl
elif current_property == 'canvas.before':
current_object.canvas_before = rl
else:
current_object.canvas_after = rl
current_property = None
lines = _lines
i = 0
else:
if current_propobject is None:
current_propobject = ParserRuleProperty(
self, ln, current_property, content)
if current_property[:3] == 'on_':
current_object.handlers.append(current_propobject)
else:
current_object.properties[current_property] = \
current_propobject
else:
current_propobject.value += '\n' + content
# Too much indentation, invalid
else:
raise ParserException(self, ln,
'Invalid indentation (too many levels)')
# Check the next line
i += 1
return objects, []
def get_proxy(widget):
try:
return widget.proxy_ref
except AttributeError:
return widget
def custom_callback(__kvlang__, idmap, *largs, **kwargs):
idmap['args'] = largs
exec(__kvlang__.co_value, idmap)
def call_fn(args, instance, v):
element, key, value, rule, idmap = args
if __debug__:
trace('Builder: call_fn %s, key=%s, value=%r, %r' % (
element, key, value, rule.value))
rule.count += 1
e_value = eval(value, idmap)
if __debug__:
trace('Builder: call_fn => value=%r' % (e_value, ))
setattr(element, key, e_value)
def delayed_call_fn(args, instance, v):
# it's already on the list
if args[-1] is not None:
return
global _delayed_start
if _delayed_start is None:
_delayed_start = args
args[-1] = StopIteration
else:
args[-1] = _delayed_start
_delayed_start = args
def update_intermediates(base, keys, bound, s, fn, args, instance, value):
''' Function that is called when an intermediate property is updated
and `rebind` of that property is True. In that case, we unbind
all bound funcs that were bound to attrs of the old value of the
property and rebind to the new value of the property.
For example, if the rule is `self.a.b.c.d`, then when b is changed, we
unbind from `b`, `c` and `d`, if they were bound before (they were not
None and `rebind` of the respective properties was True) and we rebind
to the new values of the attrs `b`, `c``, `d` that are not None and
`rebind` is True.
:Parameters:
`base`
A (proxied) ref to the base widget, `self` in the example
above.
`keys`
A list of the name off the attrs of `base` being watched. In
the example above it'd be `['a', 'b', 'c', 'd']`.
`bound`
A list 4-tuples, each tuple being (widget, attr, callback, uid)
representing callback functions bound to the attributed `attr`
of `widget`. `uid` is returned by `fbind` when binding.
The callback may be None, in which case the attr
was not bound, but is there to be able to walk the attr tree.
E.g. in the example above, if `b` was not an eventdispatcher,
`(_b_ref_, `c`, None)` would be added to the list so we can get
to `c` and `d`, which may be eventdispatchers and their attrs.
`s`
The index in `keys` of the of the attr that needs to be
updated. That is all the keys from `s` and further will be
rebound, since the `s` key was changed. In bound, the
corresponding index is `s - 1`. If `s` is None, we start from
1 (first attr).
`fn`
The function to be called args, `args` on bound callback.
'''
# first remove all the old bound functions from `s` and down.
for f, k, fun, uid in bound[s:]:
if fun is None:
continue
try:
f.unbind_uid(k, uid)
except ReferenceError:
pass
del bound[s:]
# find the first attr from which we need to start rebinding.
f = getattr(*bound[-1][:2])
if f is None:
fn(args, None, None)
return
s += 1
append = bound.append
# bind all attrs, except last to update_intermediates
for val in keys[s:-1]:
# if we need to dynamically rebind, bindm otherwise just
# add the attr to the list
if isinstance(f, (EventDispatcher, Observable)):
prop = f.property(val, True)
if prop is not None and getattr(prop, 'rebind', False):
# fbind should not dispatch, otherwise
# update_intermediates might be called in the middle
# here messing things up
uid = f.fbind(
val, update_intermediates, base, keys, bound, s, fn, args)
append([f.proxy_ref, val, update_intermediates, uid])
else:
append([f.proxy_ref, val, None, None])
else:
append([getattr(f, 'proxy_ref', f), val, None, None])
f = getattr(f, val, None)
if f is None:
break
s += 1
# for the last attr we bind directly to the setting function,
# because that attr sets the value of the rule.
if isinstance(f, (EventDispatcher, Observable)):
uid = f.fbind(keys[-1], fn, args)
if uid:
append([f.proxy_ref, keys[-1], fn, uid])
# when we rebind we have to update the
# rule with the most recent value, otherwise, the value might be wrong
# and wouldn't be updated since we might not have tracked it before.
# This only happens for a callback when rebind was True for the prop.
fn(args, None, None)
def create_handler(iself, element, key, value, rule, idmap, delayed=False):
idmap = copy(idmap)
idmap.update(global_idmap)
idmap['self'] = iself.proxy_ref
handler_append = _handlers[iself.uid][key].append
# we need a hash for when delayed, so we don't execute duplicate canvas
# callbacks from the same handler during a sync op
if delayed:
fn = delayed_call_fn
args = [element, key, value, rule, idmap, None] # see _delayed_start
else:
fn = call_fn
args = (element, key, value, rule, idmap)
# bind every key.value
if rule.watched_keys is not None:
for keys in rule.watched_keys:
base = idmap.get(keys[0])
if base is None:
continue
f = base = getattr(base, 'proxy_ref', base)
bound = []
was_bound = False
append = bound.append
# bind all attrs, except last to update_intermediates
k = 1
for val in keys[1:-1]:
# if we need to dynamically rebind, bindm otherwise
# just add the attr to the list
if isinstance(f, (EventDispatcher, Observable)):
prop = f.property(val, True)
if prop is not None and getattr(prop, 'rebind', False):
# fbind should not dispatch, otherwise
# update_intermediates might be called in the middle
# here messing things up
uid = f.fbind(
val, update_intermediates, base, keys, bound, k,
fn, args)
append([f.proxy_ref, val, update_intermediates, uid])
was_bound = True
else:
append([f.proxy_ref, val, None, None])
elif not isinstance(f, _cls_type):
append([getattr(f, 'proxy_ref', f), val, None, None])
else:
append([f, val, None, None])
f = getattr(f, val, None)
if f is None:
break
k += 1
# for the last attr we bind directly to the setting
# function, because that attr sets the value of the rule.
if isinstance(f, (EventDispatcher, Observable)):
uid = f.fbind(keys[-1], fn, args) # f is not None
if uid:
append([f.proxy_ref, keys[-1], fn, uid])
was_bound = True
if was_bound:
handler_append(bound)
try:
return eval(value, idmap)
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__, e),
cause=tb)
class ParserSelector(object):
def __init__(self, key):
self.key = key.lower()
def match(self, widget):
raise NotImplemented()
def __repr__(self):
return '<%s key=%s>' % (self.__class__.__name__, self.key)
class ParserSelectorId(ParserSelector):
def match(self, widget):
if widget.id:
return widget.id.lower() == self.key
class ParserSelectorClass(ParserSelector):
def match(self, widget):
return self.key in widget.cls
class ParserSelectorName(ParserSelector):
parents = {}
def get_bases(self, cls):
for base in cls.__bases__:
if base.__name__ == 'object':
break
yield base
if base.__name__ == 'Widget':
break
for cbase in self.get_bases(base):
yield cbase
def match(self, widget):
parents = ParserSelectorName.parents
cls = widget.__class__
if not cls in parents:
classes = [x.__name__.lower() for x in
[cls] + list(self.get_bases(cls))]
parents[cls] = classes
return self.key in parents[cls]
class BuilderBase(object):
'''The Builder is responsible for creating a :class:`Parser` for parsing a
kv file, merging the results into its internal rules, templates, etc.
By default, :class:`Builder` is a global Kivy instance used in widgets
that you can use to load other kv files in addition to the default ones.
'''
_match_cache = {}
def __init__(self):
super(BuilderBase, self).__init__()
self.files = []
self.dynamic_classes = {}
self.templates = {}
self.rules = []
self.rulectx = {}
def load_file(self, filename, **kwargs):
'''Insert a file into the language builder and return the root widget
(if defined) of the kv file.
:parameters:
`rulesonly`: bool, defaults to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
filename = resource_find(filename) or filename
if __debug__:
trace('Builder: load file %s' % filename)
with open(filename, 'r') as fd:
kwargs['filename'] = filename
data = fd.read()
# remove bom ?
if PY2:
if data.startswith((codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)):
raise ValueError('Unsupported UTF16 for kv files.')
if data.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
raise ValueError('Unsupported UTF32 for kv files.')
if data.startswith(codecs.BOM_UTF8):
data = data[len(codecs.BOM_UTF8):]
return self.load_string(data, **kwargs)
def unload_file(self, filename):
'''Unload all rules associated with a previously imported file.
.. versionadded:: 1.0.8
.. warning::
This will not remove rules or templates already applied/used on
current widgets. It will only effect the next widgets creation or
template invocation.
'''
# remove rules and templates
self.rules = [x for x in self.rules if x[1].ctx.filename != filename]
self._clear_matchcache()
templates = {}
for x, y in self.templates.items():
if y[2] != filename:
templates[x] = y
self.templates = templates
if filename in self.files:
self.files.remove(filename)
# unregister all the dynamic classes
Factory.unregister_from_filename(filename)
def load_string(self, string, **kwargs):
'''Insert a string into the Language Builder and return the root widget
(if defined) of the kv string.
:Parameters:
`rulesonly`: bool, defaults to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
# put a warning if a file is loaded multiple times
if fn in self.files:
Logger.warning(
'Lang: The file {} is loaded multiples times, '
'you might have unwanted behaviors.'.format(fn))
try:
# parse the string
parser = Parser(content=string, filename=fn)
# merge rules with our rules
self.rules.extend(parser.rules)
self._clear_matchcache()
# add the template found by the parser into ours
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name,
cls=partial(self.template, name),
is_template=True, warn=True)
# register all the dynamic classes
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn,
warn=True)
# create root object is exist
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contain also non-rules '
'directives' % filename)
# save the loaded files only if there is a root without
# template/dynamic classes
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)()
self._apply_rule(widget, parser.root, parser.root)
return widget
finally:
self._current_filename = None
def template(self, *args, **ctx):
'''Create a specialized template using a specific context.
.. versionadded:: 1.0.5
With templates, you can construct custom widgets from a kv lang
definition by giving them a context. Check :ref:`Template usage
<template_usage>`.
'''
# Prevent naming clash with whatever the user might be putting into the
# ctx as key.
name = args[0]
if name not in self.templates:
raise Exception('Unknown <%s> template name' % name)
baseclasses, rule, fn = self.templates[name]
key = '%s|%s' % (name, baseclasses)
cls = Cache.get('kv.lang', key)
if cls is None:
rootwidgets = []
for basecls in baseclasses.split('+'):
rootwidgets.append(Factory.get(basecls))
cls = type(name, tuple(rootwidgets), {})
Cache.append('kv.lang', key, cls)
widget = cls()
# in previous versions, ``ctx`` is passed as is as ``template_ctx``
# preventing widgets in it from be collected by the GC. This was
# especially relevant to AccordionItem's title_template.
proxy_ctx = {k: get_proxy(v) for k, v in ctx.items()}
self._apply_rule(widget, rule, rule, template_ctx=proxy_ctx)
return widget
def apply(self, widget):
'''Search all the rules that match the widget and apply them.
'''
rules = self.match(widget)
if __debug__:
trace('Builder: Found %d rules for %s' % (len(rules), widget))
if not rules:
return
for rule in rules:
self._apply_rule(widget, rule, rule)
def _clear_matchcache(self):
BuilderBase._match_cache = {}
def _apply_rule(self, widget, rule, rootrule, template_ctx=None):
# widget: the current instantiated widget
# rule: the current rule
# rootrule: the current root rule (for children of a rule)
# will collect reference to all the id in children
assert(rule not in self.rulectx)
self.rulectx[rule] = rctx = {
'ids': {'root': widget.proxy_ref},
'set': [], 'hdl': []}
# extract the context of the rootrule (not rule!)
assert(rootrule in self.rulectx)
rctx = self.rulectx[rootrule]
# if a template context is passed, put it as "ctx"
if template_ctx is not None:
rctx['ids']['ctx'] = QueryDict(template_ctx)
# if we got an id, put it in the root rule for a later global usage
if rule.id:
# use only the first word as `id` discard the rest.
rule.id = rule.id.split('#', 1)[0].strip()
rctx['ids'][rule.id] = widget.proxy_ref
# set id name as a attribute for root widget so one can in python
# code simply access root_widget.id_name
_ids = dict(rctx['ids'])
_root = _ids.pop('root')
_new_ids = _root.ids
for _key in iterkeys(_ids):
if _ids[_key] == _root:
# skip on self
continue
_new_ids[_key] = _ids[_key]
_root.ids = _new_ids
# first, ensure that the widget have all the properties used in
# the rule if not, they will be created as ObjectProperty.
rule.create_missing(widget)
# build the widget canvas
if rule.canvas_before:
with widget.canvas.before:
self._build_canvas(widget.canvas.before, widget,
rule.canvas_before, rootrule)
if rule.canvas_root:
with widget.canvas:
self._build_canvas(widget.canvas, widget,
rule.canvas_root, rootrule)
if rule.canvas_after:
with widget.canvas.after:
self._build_canvas(widget.canvas.after, widget,
rule.canvas_after, rootrule)
# create children tree
Factory_get = Factory.get
Factory_is_template = Factory.is_template
for crule in rule.children:
cname = crule.name
if cname in ('canvas', 'canvas.before', 'canvas.after'):
raise ParserException(
crule.ctx, crule.line,
'Canvas instructions added in kv must '
'be declared before child widgets.')
# depending if the child rule is a template or not, we are not
# having the same approach
cls = Factory_get(cname)
if Factory_is_template(cname):
# we got a template, so extract all the properties and
# handlers, and push them in a "ctx" dictionary.
ctx = {}
idmap = copy(global_idmap)
idmap.update({'root': rctx['ids']['root']})
if 'ctx' in rctx['ids']:
idmap.update({'ctx': rctx['ids']['ctx']})
try:
for prule in crule.properties.values():
value = prule.co_value
if type(value) is CodeType:
value = eval(value, idmap)
ctx[prule.name] = value
for prule in crule.handlers:
value = eval(prule.value, idmap)
ctx[prule.name] = value
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(
prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
# create the template with an explicit ctx
child = cls(**ctx)
widget.add_widget(child)
# reference it on our root rule context
if crule.id:
rctx['ids'][crule.id] = child
else:
# we got a "normal" rule, construct it manually
# we can't construct it without __no_builder=True, because the
# previous implementation was doing the add_widget() before
# apply(), and so, we could use "self.parent".
child = cls(__no_builder=True)
widget.add_widget(child)
self.apply(child)
self._apply_rule(child, crule, rootrule)
# append the properties and handlers to our final resolution task
if rule.properties:
rctx['set'].append((widget.proxy_ref,
list(rule.properties.values())))
for key, crule in rule.properties.items():
# clear previously applied rules if asked
if crule.ignore_prev:
Builder.unbind_property(widget, key)
if rule.handlers:
rctx['hdl'].append((widget.proxy_ref, rule.handlers))
# if we are applying another rule that the root one, then it's done for
# us!
if rootrule is not rule:
del self.rulectx[rule]
return
# normally, we can apply a list of properties with a proper context
try:
rule = None
for widget_set, rules in reversed(rctx['set']):
for rule in rules:
assert(isinstance(rule, ParserRuleProperty))
key = rule.name
value = rule.co_value
if type(value) is CodeType:
value = create_handler(widget_set, widget_set, key,
value, rule, rctx['ids'])
setattr(widget_set, key, value)
except Exception as e:
if rule is not None:
tb = sys.exc_info()[2]
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__,
e), cause=tb)
raise e
# build handlers
try:
crule = None
for widget_set, rules in rctx['hdl']:
for crule in rules:
assert(isinstance(crule, ParserRuleProperty))
assert(crule.name.startswith('on_'))
key = crule.name
if not widget_set.is_event_type(key):
key = key[3:]
idmap = copy(global_idmap)
idmap.update(rctx['ids'])
idmap['self'] = widget_set.proxy_ref
if not widget_set.fbind(key, custom_callback, crule,
idmap):
raise AttributeError(key)
#hack for on_parent
if crule.name == 'on_parent':
Factory.Widget.parent.dispatch(widget_set.__self__)
except Exception as e:
if crule is not None:
tb = sys.exc_info()[2]
raise BuilderException(
crule.ctx, crule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
raise e
# rule finished, forget it
del self.rulectx[rootrule]
def match(self, widget):
'''Return a list of :class:`ParserRule` objects matching the widget.
'''
cache = BuilderBase._match_cache
k = (widget.__class__, widget.id, tuple(widget.cls))
if k in cache:
return cache[k]
rules = []
for selector, rule in self.rules:
if selector.match(widget):
if rule.avoid_previous_rules:
del rules[:]
rules.append(rule)
cache[k] = rules
return rules
def sync(self):
'''Execute all the waiting operations, such as the execution of all the
expressions related to the canvas.
.. versionadded:: 1.7.0
'''
global _delayed_start
next_args = _delayed_start
if next_args is None:
return
while next_args is not StopIteration:
# is this try/except still needed? yes, in case widget died in this
# frame after the call was scheduled
try:
call_fn(next_args[:-1], None, None)
except ReferenceError:
pass
args = next_args
next_args = args[-1]
args[-1] = None
_delayed_start = None
def unbind_widget(self, uid):
'''Unbind all the handlers created by the KV rules of the
widget. The :attr:`kivy.uix.widget.Widget.uid` is passed here
instead of the widget itself, because Builder is using it in the
widget destructor.
This effectively clearls all the KV rules associated with this widget.
For example::
>>> w = Builder.load_string(\'''
... Widget:
... height: self.width / 2. if self.disabled else self.width
... x: self.y + 50
... \''')
>>> w.size
[100, 100]
>>> w.pos
[50, 0]
>>> w.width = 500
>>> w.size
[500, 500]
>>> Builder.unbind_widget(w.uid)
>>> w.width = 222
>>> w.y = 500
>>> w.size
[222, 500]
>>> w.pos
[50, 500]
.. versionadded:: 1.7.2
'''
if uid not in _handlers:
return
for prop_callbacks in _handlers[uid].values():
for callbacks in prop_callbacks:
for f, k, fn, bound_uid in callbacks:
if fn is None: # it's not a kivy prop.
continue
try:
f.unbind_uid(k, bound_uid)
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del _handlers[uid]
def unbind_property(self, widget, name):
'''Unbind the handlers created by all the rules of the widget that set
the name.
This effectively clears all the rules of widget that take the form::
name: rule
For examples::
>>> w = Builder.load_string(\'''
... Widget:
... height: self.width / 2. if self.disabled else self.width
... x: self.y + 50
... \''')
>>> w.size
[100, 100]
>>> w.pos
[50, 0]
>>> w.width = 500
>>> w.size
[500, 500]
>>> Builder.unbind_property(w, 'height')
>>> w.width = 222
>>> w.size
[222, 500]
>>> w.y = 500
>>> w.pos
[550, 500]
.. versionadded:: 1.9.1
'''
uid = widget.uid
if uid not in _handlers:
return
prop_handlers = _handlers[uid]
if name not in prop_handlers:
return
for callbacks in prop_handlers[name]:
for f, k, fn, bound_uid in callbacks:
if fn is None: # it's not a kivy prop.
continue
try:
f.unbind_uid(k, bound_uid)
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del prop_handlers[name]
if not prop_handlers:
del _handlers[uid]
def _build_canvas(self, canvas, widget, rule, rootrule):
global Instruction
if Instruction is None:
Instruction = Factory.get('Instruction')
idmap = copy(self.rulectx[rootrule]['ids'])
for crule in rule.children:
name = crule.name
if name == 'Clear':
canvas.clear()
continue
instr = Factory.get(name)()
if not isinstance(instr, Instruction):
raise BuilderException(
crule.ctx, crule.line,
'You can add only graphics Instruction in canvas.')
try:
for prule in crule.properties.values():
key = prule.name
value = prule.co_value
if type(value) is CodeType:
value = create_handler(
widget, instr.proxy_ref,
key, value, prule, idmap, True)
setattr(instr, key, value)
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(
prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
#: Main instance of a :class:`BuilderBase`.
Builder = register_context('Builder', BuilderBase)
Builder.load_file(join(kivy_data_dir, 'style.kv'), rulesonly=True)
if 'KIVY_PROFILE_LANG' in environ:
import atexit
import cgi
def match_rule(fn, index, rule):
if rule.ctx.filename != fn:
return
for prop, prp in iteritems(rule.properties):
if prp.line != index:
continue
yield prp
for child in rule.children:
for r in match_rule(fn, index, child):
yield r
if rule.canvas_root:
for r in match_rule(fn, index, rule.canvas_root):
yield r
if rule.canvas_before:
for r in match_rule(fn, index, rule.canvas_before):
yield r
if rule.canvas_after:
for r in match_rule(fn, index, rule.canvas_after):
yield r
def dump_builder_stats():
html = [
'<!doctype html>'
'<html><body>',
'<style type="text/css">\n',
'pre { margin: 0; }\n',
'</style>']
files = set([x[1].ctx.filename for x in Builder.rules])
for fn in files:
lines = open(fn).readlines()
html += ['<h2>', fn, '</h2>', '<table>']
count = 0
for index, line in enumerate(lines):
line = line.rstrip()
line = cgi.escape(line)
matched_prp = []
for psn, rule in Builder.rules:
matched_prp += list(match_rule(fn, index, rule))
count = sum(set([x.count for x in matched_prp]))
color = (255, 155, 155) if count else (255, 255, 255)
html += ['<tr style="background-color: rgb{}">'.format(color),
'<td>', str(index + 1), '</td>',
'<td>', str(count), '</td>',
'<td><pre>', line, '</pre></td>',
'</tr>']
html += ['</table>']
html += ['</body></html>']
with open('builder_stats.html', 'w') as fd:
fd.write(''.join(html))
print('Profiling written at builder_stats.html')
atexit.register(dump_builder_stats)
| mit |
ostinelli/pyopenspime | lib/pyopenspime/xmpp/roster.py | 203 | 9163 | ## roster.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: roster.py,v 1.20 2005/07/13 13:22:52 snakeru Exp $
"""
Simple roster implementation. Can be used though for different tasks like
mass-renaming of contacts.
"""
from protocol import *
from client import PlugIn
class Roster(PlugIn):
""" Defines a plenty of methods that will allow you to manage roster.
Also automatically track presences from remote JIDs taking into
account that every JID can have multiple resources connected. Does not
currently support 'error' presences.
You can also use mapping interface for access to the internal representation of
contacts in roster.
"""
def __init__(self):
""" Init internal variables. """
PlugIn.__init__(self)
self.DBG_LINE='roster'
self._data = {}
self.set=None
self._exported_methods=[self.getRoster]
def plugin(self,owner,request=1):
""" Register presence and subscription trackers in the owner's dispatcher.
Also request roster from server if the 'request' argument is set.
Used internally."""
self._owner.RegisterHandler('iq',self.RosterIqHandler,'result',NS_ROSTER)
self._owner.RegisterHandler('iq',self.RosterIqHandler,'set',NS_ROSTER)
self._owner.RegisterHandler('presence',self.PresenceHandler)
if request: self.Request()
def Request(self,force=0):
""" Request roster from server if it were not yet requested
(or if the 'force' argument is set). """
if self.set is None: self.set=0
elif not force: return
self._owner.send(Iq('get',NS_ROSTER))
self.DEBUG('Roster requested from server','start')
def getRoster(self):
""" Requests roster from server if neccessary and returns self."""
if not self.set: self.Request()
while not self.set: self._owner.Process(10)
return self
def RosterIqHandler(self,dis,stanza):
""" Subscription tracker. Used internally for setting items state in
internal roster representation. """
for item in stanza.getTag('query').getTags('item'):
jid=item.getAttr('jid')
if item.getAttr('subscription')=='remove':
if self._data.has_key(jid): del self._data[jid]
raise NodeProcessed # a MUST
self.DEBUG('Setting roster item %s...'%jid,'ok')
if not self._data.has_key(jid): self._data[jid]={}
self._data[jid]['name']=item.getAttr('name')
self._data[jid]['ask']=item.getAttr('ask')
self._data[jid]['subscription']=item.getAttr('subscription')
self._data[jid]['groups']=[]
if not self._data[jid].has_key('resources'): self._data[jid]['resources']={}
for group in item.getTags('group'): self._data[jid]['groups'].append(group.getData())
self._data[self._owner.User+'@'+self._owner.Server]={'resources':{},'name':None,'ask':None,'subscription':None,'groups':None,}
self.set=1
raise NodeProcessed # a MUST. Otherwise you'll get back an <iq type='error'/>
def PresenceHandler(self,dis,pres):
""" Presence tracker. Used internally for setting items' resources state in
internal roster representation. """
jid=JID(pres.getFrom())
if not self._data.has_key(jid.getStripped()): self._data[jid.getStripped()]={'name':None,'ask':None,'subscription':'none','groups':['Not in roster'],'resources':{}}
item=self._data[jid.getStripped()]
typ=pres.getType()
if not typ:
self.DEBUG('Setting roster item %s for resource %s...'%(jid.getStripped(),jid.getResource()),'ok')
item['resources'][jid.getResource()]=res={'show':None,'status':None,'priority':'0','timestamp':None}
if pres.getTag('show'): res['show']=pres.getShow()
if pres.getTag('status'): res['status']=pres.getStatus()
if pres.getTag('priority'): res['priority']=pres.getPriority()
if not pres.getTimestamp(): pres.setTimestamp()
res['timestamp']=pres.getTimestamp()
elif typ=='unavailable' and item['resources'].has_key(jid.getResource()): del item['resources'][jid.getResource()]
# Need to handle type='error' also
def _getItemData(self,jid,dataname):
""" Return specific jid's representation in internal format. Used internally. """
jid=jid[:(jid+'/').find('/')]
return self._data[jid][dataname]
def _getResourceData(self,jid,dataname):
""" Return specific jid's resource representation in internal format. Used internally. """
if jid.find('/')+1:
jid,resource=jid.split('/',1)
if self._data[jid]['resources'].has_key(resource): return self._data[jid]['resources'][resource][dataname]
elif self._data[jid]['resources'].keys():
lastpri=-129
for r in self._data[jid]['resources'].keys():
if int(self._data[jid]['resources'][r]['priority'])>lastpri: resource,lastpri=r,int(self._data[jid]['resources'][r]['priority'])
return self._data[jid]['resources'][resource][dataname]
def delItem(self,jid):
""" Delete contact 'jid' from roster."""
self._owner.send(Iq('set',NS_ROSTER,payload=[Node('item',{'jid':jid,'subscription':'remove'})]))
def getAsk(self,jid):
""" Returns 'ask' value of contact 'jid'."""
return self._getItemData(jid,'ask')
def getGroups(self,jid):
""" Returns groups list that contact 'jid' belongs to."""
return self._getItemData(jid,'groups')
def getName(self,jid):
""" Returns name of contact 'jid'."""
return self._getItemData(jid,'name')
def getPriority(self,jid):
""" Returns priority of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'priority')
def getRawRoster(self):
""" Returns roster representation in internal format. """
return self._data
def getRawItem(self,jid):
""" Returns roster item 'jid' representation in internal format. """
return self._data[jid[:(jid+'/').find('/')]]
def getShow(self, jid):
""" Returns 'show' value of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'show')
def getStatus(self, jid):
""" Returns 'status' value of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'status')
def getSubscription(self,jid):
""" Returns 'subscription' value of contact 'jid'."""
return self._getItemData(jid,'subscription')
def getResources(self,jid):
""" Returns list of connected resources of contact 'jid'."""
return self._data[jid[:(jid+'/').find('/')]]['resources'].keys()
def setItem(self,jid,name=None,groups=[]):
""" Creates/renames contact 'jid' and sets the groups list that it now belongs to."""
iq=Iq('set',NS_ROSTER)
query=iq.getTag('query')
attrs={'jid':jid}
if name: attrs['name']=name
item=query.setTag('item',attrs)
for group in groups: item.addChild(node=Node('group',payload=[group]))
self._owner.send(iq)
def getItems(self):
""" Return list of all [bare] JIDs that the roster is currently tracks."""
return self._data.keys()
def keys(self):
""" Same as getItems. Provided for the sake of dictionary interface."""
return self._data.keys()
def __getitem__(self,item):
""" Get the contact in the internal format. Raises KeyError if JID 'item' is not in roster."""
return self._data[item]
def getItem(self,item):
""" Get the contact in the internal format (or None if JID 'item' is not in roster)."""
if self._data.has_key(item): return self._data[item]
def Subscribe(self,jid):
""" Send subscription request to JID 'jid'."""
self._owner.send(Presence(jid,'subscribe'))
def Unsubscribe(self,jid):
""" Ask for removing our subscription for JID 'jid'."""
self._owner.send(Presence(jid,'unsubscribe'))
def Authorize(self,jid):
""" Authorise JID 'jid'. Works only if these JID requested auth previously. """
self._owner.send(Presence(jid,'subscribed'))
def Unauthorize(self,jid):
""" Unauthorise JID 'jid'. Use for declining authorisation request
or for removing existing authorization. """
self._owner.send(Presence(jid,'unsubscribed'))
| gpl-3.0 |
thobrla/apitools | apitools/base/py/credentials_lib_test.py | 11 | 4797 | #
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
import unittest2
from apitools.base.py import credentials_lib
from apitools.base.py import util
class CredentialsLibTest(unittest2.TestCase):
def _GetServiceCreds(self, service_account_name=None, scopes=None):
kwargs = {}
if service_account_name is not None:
kwargs['service_account_name'] = service_account_name
service_account_name = service_account_name or 'default'
def MockMetadataCalls(request_url):
default_scopes = scopes or ['scope1']
if request_url.endswith('scopes'):
return six.StringIO(''.join(default_scopes))
elif request_url.endswith('service-accounts'):
return six.StringIO(service_account_name)
elif request_url.endswith(
'/service-accounts/%s/token' % service_account_name):
return six.StringIO('{"access_token": "token"}')
self.fail('Unexpected HTTP request to %s' % request_url)
with mock.patch.object(credentials_lib, '_GceMetadataRequest',
side_effect=MockMetadataCalls,
autospec=True) as opener_mock:
with mock.patch.object(util, 'DetectGce',
autospec=True) as mock_detect:
mock_detect.return_value = True
credentials = credentials_lib.GceAssertionCredentials(
scopes, **kwargs)
self.assertIsNone(credentials._refresh(None))
self.assertEqual(3, opener_mock.call_count)
return credentials
def testGceServiceAccounts(self):
scopes = ['scope1']
self._GetServiceCreds()
self._GetServiceCreds(scopes=scopes)
self._GetServiceCreds(service_account_name='my_service_account',
scopes=scopes)
def testGetServiceAccount(self):
# We'd also like to test the metadata calls, which requires
# having some knowledge about how HTTP calls are made (so that
# we can mock them). It's unfortunate, but there's no way
# around it.
creds = self._GetServiceCreds()
opener = mock.MagicMock()
opener.open = mock.MagicMock()
opener.open.return_value = six.StringIO('default/\nanother')
with mock.patch.object(six.moves.urllib.request, 'build_opener',
return_value=opener,
autospec=True) as build_opener:
creds.GetServiceAccount('default')
self.assertEqual(1, build_opener.call_count)
self.assertEqual(1, opener.open.call_count)
req = opener.open.call_args[0][0]
self.assertTrue(req.get_full_url().startswith(
'http://metadata.google.internal/'))
# The urllib module does weird things with header case.
self.assertEqual('Google', req.get_header('Metadata-flavor'))
class TestGetRunFlowFlags(unittest2.TestCase):
def setUp(self):
self._flags_actual = credentials_lib.FLAGS
def tearDown(self):
credentials_lib.FLAGS = self._flags_actual
def test_with_gflags(self):
HOST = 'myhostname'
PORT = '144169'
class MockFlags(object):
auth_host_name = HOST
auth_host_port = PORT
auth_local_webserver = False
credentials_lib.FLAGS = MockFlags
flags = credentials_lib._GetRunFlowFlags([
'--auth_host_name=%s' % HOST,
'--auth_host_port=%s' % PORT,
'--noauth_local_webserver',
])
self.assertEqual(flags.auth_host_name, HOST)
self.assertEqual(flags.auth_host_port, PORT)
self.assertEqual(flags.logging_level, 'ERROR')
self.assertEqual(flags.noauth_local_webserver, True)
def test_without_gflags(self):
credentials_lib.FLAGS = None
flags = credentials_lib._GetRunFlowFlags([])
self.assertEqual(flags.auth_host_name, 'localhost')
self.assertEqual(flags.auth_host_port, [8080, 8090])
self.assertEqual(flags.logging_level, 'ERROR')
self.assertEqual(flags.noauth_local_webserver, False)
| apache-2.0 |
oudalab/fajita | pythonAPI/flask/lib/python3.5/site-packages/flask/debughelpers.py | 318 | 6024 | # -*- coding: utf-8 -*-
"""
flask.debughelpers
~~~~~~~~~~~~~~~~~~
Various helpers to make the development experience better.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from ._compat import implements_to_string, text_type
from .app import Flask
from .blueprints import Blueprint
from .globals import _request_ctx_stack
class UnexpectedUnicodeError(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
@implements_to_string
class DebugFilesKeyError(KeyError, AssertionError):
"""Raised from request.files during debugging. The idea is that it can
provide a better error message than just a generic KeyError/BadRequest.
"""
def __init__(self, request, key):
form_matches = request.form.getlist(key)
buf = ['You tried to access the file "%s" in the request.files '
'dictionary but it does not exist. The mimetype for the request '
'is "%s" instead of "multipart/form-data" which means that no '
'file contents were transmitted. To fix this error you should '
'provide enctype="multipart/form-data" in your form.' %
(key, request.mimetype)]
if form_matches:
buf.append('\n\nThe browser instead transmitted some file names. '
'This was submitted: %s' % ', '.join('"%s"' % x
for x in form_matches))
self.msg = ''.join(buf)
def __str__(self):
return self.msg
class FormDataRoutingRedirect(AssertionError):
"""This exception is raised by Flask in debug mode if it detects a
redirect caused by the routing system when the request method is not
GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
"""
def __init__(self, request):
exc = request.routing_exception
buf = ['A request was sent to this URL (%s) but a redirect was '
'issued automatically by the routing system to "%s".'
% (request.url, exc.new_url)]
# In case just a slash was appended we can be extra helpful
if request.base_url + '/' == exc.new_url.split('?')[0]:
buf.append(' The URL was defined with a trailing slash so '
'Flask will automatically redirect to the URL '
'with the trailing slash if it was accessed '
'without one.')
buf.append(' Make sure to directly send your %s-request to this URL '
'since we can\'t make browsers or HTTP clients redirect '
'with form data reliably or without user interaction.' %
request.method)
buf.append('\n\nNote: this exception is only raised in debug mode')
AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
def _dump_loader_info(loader):
yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
for key, value in sorted(loader.__dict__.items()):
if key.startswith('_'):
continue
if isinstance(value, (tuple, list)):
if not all(isinstance(x, (str, text_type)) for x in value):
continue
yield '%s:' % key
for item in value:
yield ' - %s' % item
continue
elif not isinstance(value, (str, text_type, int, float, bool)):
continue
yield '%s: %r' % (key, value)
def explain_template_loading_attempts(app, template, attempts):
"""This should help developers understand what failed"""
info = ['Locating template "%s":' % template]
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name,
srcobj.import_name)
else:
src_info = repr(srcobj)
info.append('% 5d: trying loader of %s' % (
idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(' %s' % line)
if triple is None:
detail = 'no match'
else:
detail = 'found (%r)' % (triple[1] or '<string>')
total_found += 1
info.append(' -> %s' % detail)
seems_fishy = False
if total_found == 0:
info.append('Error: the template could not be found.')
seems_fishy = True
elif total_found > 1:
info.append('Warning: multiple loaders returned a match for the template.')
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(' The template was looked up from an endpoint that '
'belongs to the blueprint "%s".' % blueprint)
info.append(' Maybe you did not place a template in the right folder?')
info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
app.logger.info('\n'.join(info))
| mit |
LLCoolDave/MafiaBot | MafiaBotTest.py | 1 | 5747 | __author__ = 'LLCoolDave'
# ToDo: Replace by proper unit tests, currently broken as it stands
import logging
from MafiaBot.MafiaBot import *
from sopel.tools import Identifier
log = logging.getLogger('MafiaBot')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
log.addHandler(ch)
mb = MafiaBot()
mainchannel = mb.mainchannel
deadchat = mb.deadchat
mafiachannel = mb.mafiachannels[0]
playerlist = [Identifier('PLAYERA'), Identifier('PLAYERB'), Identifier('PLAYERC'), Identifier('PLAYERD'), Identifier('PLAYERE'), Identifier('PLAYERF'), Identifier('PLAYERG')]
class botstub:
def msg(self, target, msg, max_messages=0):
log.info('BOT MSG @'+str(target)+': '+msg)
def join(self, param):
log.info('BOT JOIN: '+param)
def part(self, param):
log.info('BOT PART: '+param)
def write(self, param):
log.info('BOT WRITE: '+param[0]+param[1])
def say(self, msg, max_messages=0):
log.info('BOT SAY: '+msg)
def SendCommand(command, source, nick, param):
reply = mb.HandleCommand(command, source, nick, param, botstub())
log.info('COMMAND '+command+' by '+str(nick)+' in '+str(source)+' with parameter \''+str(param)+'\'')
if reply is not None:
log.info('RESPONSE ' + reply)
def SendPlayerCommand(command, source, nick, param):
reply = mb.HandlePlayerCommand(command, source, nick, param, botstub())
log.info('COMMAND '+command+' by '+str(nick)+' in '+str(source)+' with parameter \''+str(param)+'\'')
if reply is not None:
log.info('RESPONSE ' + reply)
def GameLoop():
mb.GameLoop(botstub())
def LogOff():
log.setLevel(50)
def LogOn():
log.setLevel(10)
def JoinAndStart():
for player in playerlist:
SendCommand('join', mainchannel, player, '')
SendCommand('setup', mainchannel, playerlist[0], 'load test')
SendCommand('setup', mainchannel, playerlist[0], 'daystart')
SendCommand('players', mainchannel, playerlist[0], '')
#test votes command
SendCommand('votes', mainchannel, playerlist[2], '')
SendCommand('start', mainchannel, playerlist[0], '')
SendCommand('votes', playerlist[3], playerlist[3], '')
def Vote(player, target='NoLynch'):
strtar = str(target)
SendCommand('vote', mainchannel, player, strtar)
def PassDay(target='NoLynch'):
for player in playerlist:
Vote(player, target)
def BreakPoint():
pass
def Main():
# all players join
LogOff()
JoinAndStart()
# get mafia
scums = [player for player in playerlist if mb.players[player].faction == MafiaPlayer.FACTION_MAFIA]
# get prostitute
prostitutes = [player for player in playerlist if isinstance(mb.players[player].role, Roles['prostitute'])]
if prostitutes:
pros = prostitutes[0]
else:
pros = None
# get prostitute
medics = [player for player in playerlist if isinstance(mb.players[player].role, Roles['medic'])]
if medics:
medic = medics[0]
else:
medic = None
cops = [player for player in playerlist if isinstance(mb.players[player].role, Roles['cop'])]
if cops:
cop = cops[0]
else:
cop = None
paritycops = [player for player in playerlist if isinstance(mb.players[player].role, Roles['paritycop'])]
if paritycops:
paritycop = paritycops[0]
else:
paritycop = None
trackers = [player for player in playerlist if isinstance(mb.players[player].role, Roles['tracker'])]
if trackers:
tracker = trackers[0]
else:
tracker = None
watchers = [player for player in playerlist if isinstance(mb.players[player].role, Roles['watcher'])]
if watchers:
watcher = watchers[0]
else:
watcher = None
bulletproofs = [player for player in playerlist if isinstance(mb.players[player].role, Roles['bulletproof'])]
if bulletproofs:
bulletproof = bulletproofs[0]
else:
bulletproof = None
gunsmiths = [player for player in playerlist if isinstance(mb.players[player].role, Roles['gunsmith'])]
if gunsmiths:
gunsmith = gunsmiths[0]
else:
gunsmith = None
vigilantes = [player for player in playerlist if isinstance(mb.players[player].role, Roles['vigilante'])]
if vigilantes:
vigilante = vigilantes[0]
else:
vigilante = None
aliens = [player for player in playerlist if isinstance(mb.players[player].role, Roles['alien'])]
if aliens:
alien = aliens[0]
else:
alien = None
if scums[0] in prostitutes:
scum = scums[1]
else:
scum = scums[0]
# get setup
setup = [(str(player), mb.players[player].GetFaction(), mb.players[player].role.GetRoleName()) for player in playerlist]
LogOn()
log.debug('This game\'s setup is: '+str(setup))
i = 0
while mb.active:
# lynch player i
PassDay(playerlist[i])
LogOff()
SendPlayerCommand('pass', gunsmith, gunsmith, bulletproof)
SendPlayerCommand('pass', pros, pros, cop)
SendPlayerCommand('pass', medic, medic, playerlist[0])
SendPlayerCommand('pass', cop, cop, playerlist[0])
SendPlayerCommand('pass', tracker, tracker, cop)
SendPlayerCommand('pass', watcher, watcher, cop)
SendPlayerCommand('pass', vigilante, vigilante, cop)
SendPlayerCommand('check', paritycop, paritycop, playerlist[6-i])
SendCommand('nokill', mafiachannel, scum, playerlist[0])
SendCommand('nokill', mafiachannel, pros, playerlist[0])
LogOn()
GameLoop()
i += 1
if __name__ == "__main__":
Main()
| mit |
noam09/deluge-telegramer | telegramer/include/telegram/inline/inlinequeryresultcachedsticker.py | 2 | 2684 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultCachedSticker."""
from telegram import InlineQueryResult
class InlineQueryResultCachedSticker(InlineQueryResult):
"""
Represents a link to a sticker stored on the Telegram servers. By default, this sticker will
be sent by the user. Alternatively, you can use :attr:`input_message_content` to send a
message with the specified content instead of the sticker.
Attributes:
type (:obj:`str`): 'sticker`.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
sticker_file_id (:obj:`str`): A valid file identifier of the sticker.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the sticker.
Args:
id (:obj:`str`):
sticker_file_id (:obj:`str`):
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the sticker.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
sticker_file_id,
reply_markup=None,
input_message_content=None,
**kwargs):
# Required
super(InlineQueryResultCachedSticker, self).__init__('sticker', id)
self.sticker_file_id = sticker_file_id
# Optionals
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
| gpl-3.0 |
thaumos/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ssh_host_key.py | 24 | 9501 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssh_host_key
short_description: SSH proxy host public keys in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall_ssh feature and host_key category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_ssh_host_key:
description:
- SSH proxy host public keys.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
hostname:
description:
- Hostname of the SSH server.
ip:
description:
- IP address of the SSH server.
name:
description:
- SSH public key name.
required: true
nid:
description:
- Set the nid of the ECDSA key.
choices:
- 256
- 384
- 521
port:
description:
- Port of the SSH server.
public-key:
description:
- SSH public key.
status:
description:
- Set the trust status of the public key.
choices:
- trusted
- revoked
type:
description:
- Set the type of the public key.
choices:
- RSA
- DSA
- ECDSA
- ED25519
- RSA-CA
- DSA-CA
- ECDSA-CA
- ED25519-CA
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: SSH proxy host public keys.
fortios_firewall_ssh_host_key:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_ssh_host_key:
state: "present"
hostname: "myhostname"
ip: "<your_own_value>"
name: "default_name_5"
nid: "256"
port: "7"
public-key: "<your_own_value>"
status: "trusted"
type: "RSA"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_ssh_host_key_data(json):
option_list = ['hostname', 'ip', 'name',
'nid', 'port', 'public-key',
'status', 'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_ssh_host_key(data, fos):
vdom = data['vdom']
firewall_ssh_host_key_data = data['firewall_ssh_host_key']
filtered_data = filter_firewall_ssh_host_key_data(firewall_ssh_host_key_data)
if firewall_ssh_host_key_data['state'] == "present":
return fos.set('firewall.ssh',
'host-key',
data=filtered_data,
vdom=vdom)
elif firewall_ssh_host_key_data['state'] == "absent":
return fos.delete('firewall.ssh',
'host-key',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall_ssh(data, fos):
login(data)
methodlist = ['firewall_ssh_host_key']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_ssh_host_key": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"hostname": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"nid": {"required": False, "type": "str",
"choices": ["256", "384", "521"]},
"port": {"required": False, "type": "int"},
"public-key": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["trusted", "revoked"]},
"type": {"required": False, "type": "str",
"choices": ["RSA", "DSA", "ECDSA",
"ED25519", "RSA-CA", "DSA-CA",
"ECDSA-CA", "ED25519-CA"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lucastheis/c2s | scripts/c2s-evaluate.py | 1 | 7723 | #!/usr/bin/env python
"""
Evaluates firing rate predictions in terms of correlations or Poisson likelihoods.
Examples:
c2s evaluate data.preprocessed.pck predictions.pck
"""
import os
import sys
from argparse import ArgumentParser
from scipy.io import savemat
from pickle import load
from numpy import mean, min, hstack, asarray, average, unique, ones
from c2s import evaluate, load_data
from c2s.experiment import Experiment
from c2s.utils import convert
def print_traces(result, fps):
"""
Prints the result for each trace and averages over traces.
"""
for k, r in enumerate(result):
print '{0:>5} {1:>6.1f} {2:>8.3f}'.format(k, fps[-1][k], r)
print '-------------------------'
print '{0:>5} {1:>6.1f} {2:>8.3f}'.format('Avg.', mean(fps[-1]), mean(result))
print
def print_weighted_average(result, data, downsampling):
"""
Prints the result for each cell by calculating a weighted average of all traces of a cell.
The overall average of cells is also weighted by the recording time of the cell.
"""
if not 'cell_num' in data[0]:
cell_results = result
cell_nums = range(len(cell_results))
cell_fps = asarray([entry['fps'] / downsampling for entry in data])
cell_weights = [entry['calcium'].size / float(entry['fps']) for entry in data]
number_of_traces = ones(len(cell_results))
else:
# the following code can be written more efficiently,
# but it's not necessary given the small number of traces and cells
cell_nums = unique([entry['cell_num'] for entry in data])
cell_results = []
number_of_traces = []
cell_fps = []
cell_weights = []
for i in cell_nums:
traces_results = []
traces_fps = []
traces_weights = []
# find the results and weights for all traces belonging to cell i
for k, entry in enumerate(data):
if entry['cell_num'] == i:
traces_results.append(result[k])
traces_fps.append(entry['fps'] / downsampling)
traces_weights.append(entry['calcium'].size / float(entry['fps']))
cell_results.append(average(traces_results, weights=traces_weights))
cell_fps.append(average(traces_fps, weights=traces_weights))
cell_weights.append(sum(traces_weights))
number_of_traces.append(len(traces_results))
cell_results = asarray(cell_results)
number_of_traces = asarray(number_of_traces)
cell_fps = asarray(cell_fps)
weighted_average = average(cell_results, weights=cell_weights)
weighted_average_fps = average(cell_fps, weights=cell_weights)
for k, r in enumerate(cell_results):
print '{0:>5} {1:>7} {2:>6.1f} {3:>8.3f}'.format(cell_nums[k], number_of_traces[k], cell_fps[k], r)
print '-----------------------------'
print '{0:>5} {1:>7} {2:>6.1f} {3:>8.3f}'.format('Avg.', '', weighted_average_fps, weighted_average)
print
def main(argv):
parser = ArgumentParser(argv[0], description=__doc__)
parser.add_argument('dataset', type=str)
parser.add_argument('predictions', type=str, nargs='?')
parser.add_argument('--downsampling', '-s', type=int, default=[1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50], nargs='+')
parser.add_argument('--optimize', '-z', type=int, default=1,
help='Whether or not to optimize point-wise nonlinearity when evaluating likelihood.')
parser.add_argument('--regularization', '-r', type=float, default=5e-8,
help='Controls smoothness of optimized nonlinearity (default: 5e-8).')
parser.add_argument('--method', '-m', type=str, default='corr', choices=['corr', 'auc', 'info'])
parser.add_argument('--weighted-average','-w', type=int, default=0,
help='Whether or not traces to weight traces by their duration.')
parser.add_argument('--output', '-o', type=str, default='')
parser.add_argument('--verbosity', '-v', type=int, default=1)
args, _ = parser.parse_known_args(argv[1:])
experiment = Experiment()
data = load_data(args.dataset)
if not args.predictions:
# use raw calcium signal for prediction
calcium_min = min(hstack(entry['calcium'] for entry in data))
for entry in data:
entry['predictions'] = entry['calcium'] - calcium_min + 1e-5
else:
predictions = load_data(args.predictions)
try:
if len(predictions) != len(data):
raise ValueError()
for entry1, entry2 in zip(data, predictions):
if entry1['calcium'].size != entry2['predictions'].size:
raise ValueError()
entry1['predictions'] = entry2['predictions']
except ValueError:
print 'These predictions seem to be for a different dataset.'
return 1
fps = []
loglik = []
correlations = []
auc = []
entropy = []
functions = []
for ds in args.downsampling:
if args.verbosity > 0:
if args.weighted_average:
if args.method.lower().startswith('c'):
print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'Correlation')
elif args.method.lower().startswith('a'):
print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'AUC')
else:
print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'Information gain')
else:
if args.method.lower().startswith('c'):
print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'Correlation')
elif args.method.lower().startswith('a'):
print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'AUC')
else:
print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'Information gain')
fps.append([])
for entry in data:
fps[-1].append(entry['fps'] / ds)
if args.method.lower().startswith('c'):
# compute correlations
R = evaluate(data, method=args.method,
optimize=args.optimize,
downsampling=ds,
verbosity=args.verbosity)
correlations.append(R)
if args.verbosity > 0:
if args.weighted_average:
print_weighted_average(R, data, ds)
else:
print_traces(R, fps)
elif args.method.lower().startswith('a'):
# compute correlations
A = evaluate(data, method=args.method,
optimize=args.optimize,
downsampling=ds,
verbosity=args.verbosity)
auc.append(A)
if args.verbosity > 0:
if args.weighted_average:
print_weighted_average(A, data, ds)
else:
print_traces(A, fps)
else:
# compute log-likelihoods
L, H, f = evaluate(data, method='loglik',
optimize=args.optimize,
downsampling=ds,
verbosity=args.verbosity,
return_all=True,
regularize=args.regularization)
loglik.append(L)
entropy.append(H)
functions.append((f.x, f.y))
if args.verbosity > 0:
if args.weighted_average:
print_weighted_average(H + L, data, ds)
else:
print_traces(H + L, fps)
if args.output.lower().endswith('.mat'):
if args.method.lower().startswith('c'):
savemat(args.output, convert({'fps': asarray(fps), 'correlations': asarray(correlations)}))
elif args.method.lower().startswith('a'):
savemat(args.output, convert({'fps': asarray(fps), 'auc': asarray(auc)}))
else:
savemat(args.output, convert({
'fps': asarray(fps),
'loglik': asarray(loglik),
'entropy': asarray(entropy),
'info': asarray(loglik) + asarray(entropy)}))
elif args.output:
if os.path.isdir(args.output):
filepath = os.path.join(args.output, args.method + '.{0}.{1}.xpck')
else:
filepath = args.output
experiment['args'] = args
experiment['fps'] = asarray(fps)
if args.method.lower().startswith('c'):
experiment['correlations'] = asarray(correlations)
elif args.method.lower().startswith('a'):
experiment['auc'] = asarray(auc)
else:
experiment['loglik'] = asarray(loglik)
experiment['entropy'] = asarray(entropy)
experiment['info'] = asarray(loglik) + asarray(entropy)
experiment['f'] = functions
experiment.save(filepath, overwrite=True)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
tonisbones/thisiship | utils.py | 1 | 1827 | import json
import sys
import os
import facebook
import datetime
import logging
template_dir = "templates"
log_dir = "logs"
log_level = logging.INFO
default_event_priority = "9"
jsondump_dir = "jsondump"
pages_filename = "pages.json"
"""
the next 3 methods are duplicates. need to abstract it out
enum?
constants?
"""
def get_header():
#get document header from template
doc_head = ""
file_path = os.path.join(template_dir, "doc_head.html")
with open(file_path, 'r') as head:
doc_head = head.read()
head.close()
return doc_head
def get_footer():
#get document footer from template
doc_foot = ""
file_path = os.path.join(template_dir, "doc_foot.html")
with open(file_path, 'r') as foot:
doc_foot = foot.read()
foot.close()
return doc_foot
def get_promo_banner():
#get document footer from template
promo = ""
file_path = os.path.join(template_dir, "promo_banner.html")
with open(file_path, 'r') as promo_file:
promo = promo_file.read()
promo_file.close()
return promo
def get_pages():
pages = {}
with open(pages_filename, 'r') as pages_file:
pages = json.load(pages_file)
pages_file.close()
return pages
def update_pages(new_pages):
with open(pages_filename, 'w') as pages_file:
json.dump(new_pages, pages_file)
pages_file.close()
def get_facebook_graph():
f = open('access_token.txt', 'r')
access_token = f.read()
f.close()
graph = facebook.GraphAPI(access_token=access_token)
return graph
def get_logfile():
date_today = datetime.date.today().isoformat()
logfile_name = os.path.join(log_dir, date_today) + ".log"
return logfile_name
def log_intro(parent_script):
logging.info("{}:{}:".format(datetime.datetime.now().isoformat(),parent_script))
| gpl-3.0 |
talha131/pelican-plugins | tag_cloud/tag_cloud.py | 15 | 2646 | '''
tag_cloud
===================================
This plugin generates a tag cloud from available tags
'''
from __future__ import unicode_literals
from collections import defaultdict
from operator import itemgetter
import logging
import math
import random
from pelican import signals
logger = logging.getLogger(__name__)
def set_default_settings(settings):
settings.setdefault('TAG_CLOUD_STEPS', 4)
settings.setdefault('TAG_CLOUD_MAX_ITEMS', 100)
settings.setdefault('TAG_CLOUD_SORTING', 'random')
settings.setdefault('TAG_CLOUD_BADGE', False)
def init_default_config(pelican):
from pelican.settings import DEFAULT_CONFIG
set_default_settings(DEFAULT_CONFIG)
if(pelican):
set_default_settings(pelican.settings)
def generate_tag_cloud(generator):
tag_cloud = defaultdict(int)
for article in generator.articles:
for tag in getattr(article, 'tags', []):
tag_cloud[tag] += 1
tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
tag_cloud = tag_cloud[:generator.settings.get('TAG_CLOUD_MAX_ITEMS')]
tags = list(map(itemgetter(1), tag_cloud))
if tags:
max_count = tags[0]
min_count = tags[-1]
steps = generator.settings.get('TAG_CLOUD_STEPS')
# calculate word sizes
def generate_tag(tag, count):
tag = (
tag,
int(math.floor(steps - (steps - 1) * math.log(count - min_count + 1)
/ (math.log(max_count - min_count + 1) or 1)))
)
if generator.settings.get('TAG_CLOUD_BADGE'):
tag += (count,)
return tag
tag_cloud = [
generate_tag(tag, count)
for tag, count in tag_cloud
]
sorting = generator.settings.get('TAG_CLOUD_SORTING')
if sorting == 'alphabetically':
tag_cloud.sort(key=lambda elem: elem[0].name)
elif sorting == 'alphabetically-rev':
tag_cloud.sort(key=lambda elem: elem[0].name, reverse=True)
elif sorting == 'size':
tag_cloud.sort(key=lambda elem: elem[1])
elif sorting == 'size-rev':
tag_cloud.sort(key=lambda elem: elem[1], reverse=True)
elif sorting == 'random':
random.shuffle(tag_cloud)
else:
logger.warning("setting for TAG_CLOUD_SORTING not recognized: %s, "
"falling back to 'random'", sorting)
random.shuffle(tag_cloud)
# make available in context
generator.tag_cloud = tag_cloud
generator._update_context(['tag_cloud'])
def register():
signals.initialized.connect(init_default_config)
signals.article_generator_finalized.connect(generate_tag_cloud)
| agpl-3.0 |
mttr/django | django/core/management/utils.py | 405 | 2590 | from __future__ import unicode_literals
import os
import sys
from subprocess import PIPE, Popen
from django.utils import six
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, universal_newlines=True):
"""
Friendly wrapper around Popen.
Returns stdout output, stderr output and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE,
close_fds=os.name != 'nt', universal_newlines=universal_newlines)
except OSError as e:
strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING,
strings_only=True)
six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' %
(args[0], strerror)), sys.exc_info()[2])
output, errors = p.communicate()
return (
output,
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True),
p.returncode
)
def handle_extensions(extensions):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, six.string_types):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
| bsd-3-clause |
alex/kombu | kombu/transport/sqlalchemy/models.py | 38 | 1961 | from __future__ import absolute_import
import datetime
from sqlalchemy import (Column, Integer, String, Text, DateTime,
Sequence, Boolean, ForeignKey, SmallInteger)
from sqlalchemy.orm import relation
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.schema import MetaData
class_registry = {}
metadata = MetaData()
ModelBase = declarative_base(metadata=metadata, class_registry=class_registry)
class Queue(object):
__table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'}
id = Column(Integer, Sequence('queue_id_sequence'), primary_key=True,
autoincrement=True)
name = Column(String(200), unique=True)
def __init__(self, name):
self.name = name
def __str__(self):
return '<Queue({self.name})>'.format(self=self)
@declared_attr
def messages(cls):
return relation('Message', backref='queue', lazy='noload')
class Message(object):
__table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'}
id = Column(Integer, Sequence('message_id_sequence'),
primary_key=True, autoincrement=True)
visible = Column(Boolean, default=True, index=True)
sent_at = Column('timestamp', DateTime, nullable=True, index=True,
onupdate=datetime.datetime.now)
payload = Column(Text, nullable=False)
version = Column(SmallInteger, nullable=False, default=1)
__mapper_args__ = {'version_id_col': version}
def __init__(self, payload, queue):
self.payload = payload
self.queue = queue
def __str__(self):
return '<Message: {0.sent_at} {0.payload} {0.queue_id}>'.format(self)
@declared_attr
def queue_id(self):
return Column(
Integer,
ForeignKey(
'%s.id' % class_registry['Queue'].__tablename__,
name='FK_kombu_message_queue'
)
)
| bsd-3-clause |
koparasy/faultinjection-gem5 | src/arch/x86/isa/insts/simd64/integer/data_transfer/move_mask.py | 91 | 2227 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PMOVMSKB_R_MMX {
limm reg, 0
movsign reg, mmxm, size=1, ext=0
};
'''
| bsd-3-clause |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | .meteor/local/dev_bundle/python/Lib/lib2to3/fixes/fix_idioms.py | 327 | 4889 | """Adjust some old Python 2 idioms to their modern counterparts.
* Change some type comparisons to isinstance() calls:
type(x) == T -> isinstance(x, T)
type(x) is T -> isinstance(x, T)
type(x) != T -> not isinstance(x, T)
type(x) is not T -> not isinstance(x, T)
* Change "while 1:" into "while True:".
* Change both
v = list(EXPR)
v.sort()
foo(v)
and the more general
v = EXPR
v.sort()
foo(v)
into
v = sorted(EXPR)
foo(v)
"""
# Author: Jacques Frechet, Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
class FixIdioms(fixer_base.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = r"""
isinstance=comparison< %s %s T=any >
|
isinstance=comparison< T=any %s %s >
|
while_stmt< 'while' while='1' ':' any+ >
|
sorted=any<
any*
simple_stmt<
expr_stmt< id1=any '='
power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
>
'\n'
>
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
|
sorted=any<
any*
simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
""" % (TYPE, CMP, CMP, TYPE)
def match(self, node):
r = super(FixIdioms, self).match(node)
# If we've matched one of the sort/sorted subpatterns above, we
# want to reject matches where the initial assignment and the
# subsequent .sort() call involve different identifiers.
if r and "sorted" in r:
if r["id1"] == r["id2"]:
return r
return None
return r
def transform(self, node, results):
if "isinstance" in results:
return self.transform_isinstance(node, results)
elif "while" in results:
return self.transform_while(node, results)
elif "sorted" in results:
return self.transform_sort(node, results)
else:
raise RuntimeError("Invalid match")
def transform_isinstance(self, node, results):
x = results["x"].clone() # The thing inside of type()
T = results["T"].clone() # The type being compared against
x.prefix = u""
T.prefix = u" "
test = Call(Name(u"isinstance"), [x, Comma(), T])
if "n" in results:
test.prefix = u" "
test = Node(syms.not_test, [Name(u"not"), test])
test.prefix = node.prefix
return test
def transform_while(self, node, results):
one = results["while"]
one.replace(Name(u"True", prefix=one.prefix))
def transform_sort(self, node, results):
sort_stmt = results["sort"]
next_stmt = results["next"]
list_call = results.get("list")
simple_expr = results.get("expr")
if list_call:
list_call.replace(Name(u"sorted", prefix=list_call.prefix))
elif simple_expr:
new = simple_expr.clone()
new.prefix = u""
simple_expr.replace(Call(Name(u"sorted"), [new],
prefix=simple_expr.prefix))
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
btwn = sort_stmt.prefix
# Keep any prefix lines between the sort_stmt and the list_call and
# shove them right after the sorted() call.
if u"\n" in btwn:
if next_stmt:
# The new prefix should be everything from the sort_stmt's
# prefix up to the last newline, then the old prefix after a new
# line.
prefix_lines = (btwn.rpartition(u"\n")[0], next_stmt[0].prefix)
next_stmt[0].prefix = u"\n".join(prefix_lines)
else:
assert list_call.parent
assert list_call.next_sibling is None
# Put a blank line after list_call and set its prefix.
end_line = BlankLine()
list_call.parent.append_child(end_line)
assert list_call.next_sibling is end_line
# The new prefix should be everything up to the first new line
# of sort_stmt's prefix.
end_line.prefix = btwn.rpartition(u"\n")[0]
| gpl-3.0 |
StackStorm/pecan | pecan/templating.py | 5 | 8272 | from .compat import escape
from .jsonify import encode
_builtin_renderers = {}
error_formatters = []
#
# JSON rendering engine
#
class JsonRenderer(object):
'''
Defines the builtin ``JSON`` renderer.
'''
def __init__(self, path, extra_vars):
pass
def render(self, template_path, namespace):
'''
Implements ``JSON`` rendering.
'''
return encode(namespace)
# TODO: add error formatter for json (pass it through json lint?)
_builtin_renderers['json'] = JsonRenderer
#
# Genshi rendering engine
#
try:
from genshi.template import (TemplateLoader,
TemplateError as gTemplateError)
class GenshiRenderer(object):
'''
Defines the builtin ``Genshi`` renderer.
'''
def __init__(self, path, extra_vars):
self.loader = TemplateLoader([path], auto_reload=True)
self.extra_vars = extra_vars
def render(self, template_path, namespace):
'''
Implements ``Genshi`` rendering.
'''
tmpl = self.loader.load(template_path)
stream = tmpl.generate(**self.extra_vars.make_ns(namespace))
return stream.render('html')
_builtin_renderers['genshi'] = GenshiRenderer
def format_genshi_error(exc_value):
'''
Implements ``Genshi`` renderer error formatting.
'''
if isinstance(exc_value, (gTemplateError)):
retval = '<h4>Genshi error %s</h4>' % escape(
exc_value.args[0],
True
)
retval += format_line_context(exc_value.filename, exc_value.lineno)
return retval
error_formatters.append(format_genshi_error)
except ImportError: # pragma no cover
pass
#
# Mako rendering engine
#
try:
from mako.lookup import TemplateLookup
from mako.exceptions import (CompileException, SyntaxException,
html_error_template)
class MakoRenderer(object):
'''
Defines the builtin ``Mako`` renderer.
'''
def __init__(self, path, extra_vars):
self.loader = TemplateLookup(
directories=[path],
output_encoding='utf-8'
)
self.extra_vars = extra_vars
def render(self, template_path, namespace):
'''
Implements ``Mako`` rendering.
'''
tmpl = self.loader.get_template(template_path)
return tmpl.render(**self.extra_vars.make_ns(namespace))
_builtin_renderers['mako'] = MakoRenderer
def format_mako_error(exc_value):
'''
Implements ``Mako`` renderer error formatting.
'''
if isinstance(exc_value, (CompileException, SyntaxException)):
return html_error_template().render(full=False, css=False)
error_formatters.append(format_mako_error)
except ImportError: # pragma no cover
pass
#
# Kajiki rendering engine
#
try:
from kajiki.loader import FileLoader
class KajikiRenderer(object):
'''
Defines the builtin ``Kajiki`` renderer.
'''
def __init__(self, path, extra_vars):
self.loader = FileLoader(path, reload=True)
self.extra_vars = extra_vars
def render(self, template_path, namespace):
'''
Implements ``Kajiki`` rendering.
'''
Template = self.loader.import_(template_path)
stream = Template(self.extra_vars.make_ns(namespace))
return stream.render()
_builtin_renderers['kajiki'] = KajikiRenderer
# TODO: add error formatter for kajiki
except ImportError: # pragma no cover
pass
#
# Jinja2 rendering engine
#
try:
from jinja2 import Environment, FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError as jTemplateSyntaxError
class JinjaRenderer(object):
'''
Defines the builtin ``Jinja`` renderer.
'''
def __init__(self, path, extra_vars):
self.env = Environment(loader=FileSystemLoader(path))
self.extra_vars = extra_vars
def render(self, template_path, namespace):
'''
Implements ``Jinja`` rendering.
'''
template = self.env.get_template(template_path)
return template.render(self.extra_vars.make_ns(namespace))
_builtin_renderers['jinja'] = JinjaRenderer
def format_jinja_error(exc_value):
'''
Implements ``Jinja`` renderer error formatting.
'''
retval = '<h4>Jinja2 error in \'%s\' on line %d</h4><div>%s</div>'
if isinstance(exc_value, (jTemplateSyntaxError)):
retval = retval % (
exc_value.name,
exc_value.lineno,
exc_value.message
)
retval += format_line_context(exc_value.filename, exc_value.lineno)
return retval
error_formatters.append(format_jinja_error)
except ImportError: # pragma no cover
pass
#
# format helper function
#
def format_line_context(filename, lineno, context=10):
'''
Formats the the line context for error rendering.
:param filename: the location of the file, within which the error occurred
:param lineno: the offending line number
:param context: number of lines of code to display before and after the
offending line.
'''
lines = open(filename).readlines()
lineno = lineno - 1 # files are indexed by 1 not 0
if lineno > 0:
start_lineno = max(lineno - context, 0)
end_lineno = lineno + context
lines = [escape(l, True) for l in lines[start_lineno:end_lineno]]
i = lineno - start_lineno
lines[i] = '<strong>%s</strong>' % lines[i]
else:
lines = [escape(l, True) for l in lines[:context]]
msg = '<pre style="background-color:#ccc;padding:2em;">%s</pre>'
return msg % ''.join(lines)
#
# Extra Vars Rendering
#
class ExtraNamespace(object):
'''
Extra variables for the template namespace to pass to the renderer as named
parameters.
:param extras: dictionary of extra parameters. Defaults to an empty dict.
'''
def __init__(self, extras={}):
self.namespace = dict(extras)
def update(self, d):
'''
Updates the extra variable dictionary for the namespace.
'''
self.namespace.update(d)
def make_ns(self, ns):
'''
Returns the `lazily` created template namespace.
'''
if self.namespace:
val = {}
val.update(self.namespace)
val.update(ns)
return val
else:
return ns
#
# Rendering Factory
#
class RendererFactory(object):
'''
Manufactures known Renderer objects.
:param custom_renderers: custom-defined renderers to manufacture
:param extra_vars: extra vars for the template namespace
'''
def __init__(self, custom_renderers={}, extra_vars={}):
self._renderers = {}
self._renderer_classes = dict(_builtin_renderers)
self.add_renderers(custom_renderers)
self.extra_vars = ExtraNamespace(extra_vars)
def add_renderers(self, custom_dict):
'''
Adds a custom renderer.
:param custom_dict: a dictionary of custom renderers to add
'''
self._renderer_classes.update(custom_dict)
def available(self, name):
'''
Returns true if queried renderer class is available.
:param name: renderer name
'''
return name in self._renderer_classes
def get(self, name, template_path):
'''
Returns the renderer object.
:param name: name of the requested renderer
:param template_path: path to the template
'''
if name not in self._renderers:
cls = self._renderer_classes.get(name)
if cls is None:
return None
else:
self._renderers[name] = cls(template_path, self.extra_vars)
return self._renderers[name]
| bsd-3-clause |
Parrot-Developers/ardupilot | Tools/autotest/param_metadata/htmlemit.py | 148 | 2754 | #!/usr/bin/env python
import re
from param import *
from emit import Emit
import cgi
# Emit docs in a form acceptable to the APM wordpress docs site
class HtmlEmit(Emit):
def __init__(self):
html_fname = 'Parameters.html'
self.f = open(html_fname, mode='w')
self.preamble = '''<!-- Dynamically generated list of documented parameters
This page was generated using Tools/autotest/param_metadata/param_parse.py
DO NOT EDIT
-->
<h3 style="text-align: center">Complete Parameter List</h3>
<hr />
<p>This is a complete list of the parameters which can be set via the MAVLink protocol in the EEPROM of your APM to control vehicle behaviour. This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.</p>
<!-- add auto-generated table of contents with "Table of Contents Plus" plugin -->
[toc exclude="Complete Parameter List"]
'''
self.t = ''
def escape(self, s):
s = s.replace(' ', '-')
s = s.replace(':', '-')
s = s.replace('(', '')
s = s.replace(')', '')
return s
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
pass
def emit(self, g, f):
tag = '%s Parameters' % g.name
t = '\n\n<h1>%s</h1>\n' % tag
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
tag = '%s (%s)' % (param.DisplayName, param.name)
t += '\n\n<h2>%s</h2>' % tag
if d.get('User',None) == 'Advanced':
t += '<em>Note: This parameter is for advanced users</em><br>'
t += "\n\n<p>%s</p>\n" % cgi.escape(param.Description)
t += "<ul>\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
values = (param.__dict__[field]).split(',')
t += "<table><th>Value</th><th>Meaning</th>\n"
for value in values:
v = value.split(':')
t += "<tr><td>%s</td><td>%s</td></tr>\n" % (v[0], v[1])
t += "</table>\n"
else:
t += "<li>%s: %s</li>\n" % (field, cgi.escape(param.__dict__[field]))
t += "</ul>\n"
self.t += t
| gpl-3.0 |
rolapp/ZattooBox | pytz/tests/test_lazy.py | 88 | 9772 | from operator import *
import os.path
import sys
import unittest
import warnings
if __name__ == '__main__':
# Only munge path if invoked as a script. Testrunners should have setup
# the paths already
sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, os.pardir)))
from pytz.lazy import LazyList, LazySet
class LazyListTestCase(unittest.TestCase):
initial_data = [3,2,1]
def setUp(self):
self.base = [3, 2, 1]
self.lesser = [2, 1, 0]
self.greater = [4, 3, 2]
self.lazy = LazyList(iter(list(self.base)))
def test_unary_ops(self):
unary_ops = [str, repr, len, bool, not_]
try:
unary_ops.append(unicode)
except NameError:
pass # unicode no longer exists in Python 3.
for op in unary_ops:
self.assertEqual(
op(self.lazy),
op(self.base), str(op))
def test_binary_ops(self):
binary_ops = [eq, ge, gt, le, lt, ne, add, concat]
try:
binary_ops.append(cmp)
except NameError:
pass # cmp no longer exists in Python 3.
for op in binary_ops:
self.assertEqual(
op(self.lazy, self.lazy),
op(self.base, self.base), str(op))
for other in [self.base, self.lesser, self.greater]:
self.assertEqual(
op(self.lazy, other),
op(self.base, other), '%s %s' % (op, other))
self.assertEqual(
op(other, self.lazy),
op(other, self.base), '%s %s' % (op, other))
# Multiplication
self.assertEqual(self.lazy * 3, self.base * 3)
self.assertEqual(3 * self.lazy, 3 * self.base)
# Contains
self.assertTrue(2 in self.lazy)
self.assertFalse(42 in self.lazy)
def test_iadd(self):
self.lazy += [1]
self.base += [1]
self.assertEqual(self.lazy, self.base)
def test_bool(self):
self.assertTrue(bool(self.lazy))
self.assertFalse(bool(LazyList()))
self.assertFalse(bool(LazyList(iter([]))))
def test_hash(self):
self.assertRaises(TypeError, hash, self.lazy)
def test_isinstance(self):
self.assertTrue(isinstance(self.lazy, list))
self.assertFalse(isinstance(self.lazy, tuple))
def test_callable(self):
try:
callable
except NameError:
return # No longer exists with Python 3.
self.assertFalse(callable(self.lazy))
def test_append(self):
self.base.append('extra')
self.lazy.append('extra')
self.assertEqual(self.lazy, self.base)
def test_count(self):
self.assertEqual(self.lazy.count(2), 1)
def test_index(self):
self.assertEqual(self.lazy.index(2), 1)
def test_extend(self):
self.base.extend([6, 7])
self.lazy.extend([6, 7])
self.assertEqual(self.lazy, self.base)
def test_insert(self):
self.base.insert(0, 'ping')
self.lazy.insert(0, 'ping')
self.assertEqual(self.lazy, self.base)
def test_pop(self):
self.assertEqual(self.lazy.pop(), self.base.pop())
self.assertEqual(self.lazy, self.base)
def test_remove(self):
self.base.remove(2)
self.lazy.remove(2)
self.assertEqual(self.lazy, self.base)
def test_reverse(self):
self.base.reverse()
self.lazy.reverse()
self.assertEqual(self.lazy, self.base)
def test_reversed(self):
self.assertEqual(list(reversed(self.lazy)), list(reversed(self.base)))
def test_sort(self):
self.base.sort()
self.assertNotEqual(self.lazy, self.base, 'Test data already sorted')
self.lazy.sort()
self.assertEqual(self.lazy, self.base)
def test_sorted(self):
self.assertEqual(sorted(self.lazy), sorted(self.base))
def test_getitem(self):
for idx in range(-len(self.base), len(self.base)):
self.assertEqual(self.lazy[idx], self.base[idx])
def test_setitem(self):
for idx in range(-len(self.base), len(self.base)):
self.base[idx] = idx + 1000
self.assertNotEqual(self.lazy, self.base)
self.lazy[idx] = idx + 1000
self.assertEqual(self.lazy, self.base)
def test_delitem(self):
del self.base[0]
self.assertNotEqual(self.lazy, self.base)
del self.lazy[0]
self.assertEqual(self.lazy, self.base)
del self.base[-2]
self.assertNotEqual(self.lazy, self.base)
del self.lazy[-2]
self.assertEqual(self.lazy, self.base)
def test_iter(self):
self.assertEqual(list(iter(self.lazy)), list(iter(self.base)))
def test_getslice(self):
for i in range(-len(self.base), len(self.base)):
for j in range(-len(self.base), len(self.base)):
for step in [-1, 1]:
self.assertEqual(self.lazy[i:j:step], self.base[i:j:step])
def test_setslice(self):
for i in range(-len(self.base), len(self.base)):
for j in range(-len(self.base), len(self.base)):
for step in [-1, 1]:
replacement = range(0, len(self.base[i:j:step]))
self.base[i:j:step] = replacement
self.lazy[i:j:step] = replacement
self.assertEqual(self.lazy, self.base)
def test_delslice(self):
del self.base[0:1]
del self.lazy[0:1]
self.assertEqual(self.lazy, self.base)
del self.base[-1:1:-1]
del self.lazy[-1:1:-1]
self.assertEqual(self.lazy, self.base)
class LazySetTestCase(unittest.TestCase):
initial_data = set([3,2,1])
def setUp(self):
self.base = set([3, 2, 1])
self.lazy = LazySet(iter(set(self.base)))
def test_unary_ops(self):
# These ops just need to work.
unary_ops = [str, repr]
try:
unary_ops.append(unicode)
except NameError:
pass # unicode no longer exists in Python 3.
for op in unary_ops:
op(self.lazy) # These ops just need to work.
# These ops should return identical values as a real set.
unary_ops = [len, bool, not_]
for op in unary_ops:
self.assertEqual(
op(self.lazy),
op(self.base), '%s(lazy) == %r' % (op, op(self.lazy)))
def test_binary_ops(self):
binary_ops = [eq, ge, gt, le, lt, ne, sub, and_, or_, xor]
try:
binary_ops.append(cmp)
except NameError:
pass # cmp no longer exists in Python 3.
for op in binary_ops:
self.assertEqual(
op(self.lazy, self.lazy),
op(self.base, self.base), str(op))
self.assertEqual(
op(self.lazy, self.base),
op(self.base, self.base), str(op))
self.assertEqual(
op(self.base, self.lazy),
op(self.base, self.base), str(op))
# Contains
self.assertTrue(2 in self.lazy)
self.assertFalse(42 in self.lazy)
def test_iops(self):
try:
iops = [isub, iand, ior, ixor]
except NameError:
return # Don't exist in older Python versions.
for op in iops:
# Mutating operators, so make fresh copies.
lazy = LazySet(self.base)
base = self.base.copy()
op(lazy, set([1]))
op(base, set([1]))
self.assertEqual(lazy, base, str(op))
def test_bool(self):
self.assertTrue(bool(self.lazy))
self.assertFalse(bool(LazySet()))
self.assertFalse(bool(LazySet(iter([]))))
def test_hash(self):
self.assertRaises(TypeError, hash, self.lazy)
def test_isinstance(self):
self.assertTrue(isinstance(self.lazy, set))
def test_callable(self):
try:
callable
except NameError:
return # No longer exists with Python 3.
self.assertFalse(callable(self.lazy))
def test_add(self):
self.base.add('extra')
self.lazy.add('extra')
self.assertEqual(self.lazy, self.base)
def test_copy(self):
self.assertEqual(self.lazy.copy(), self.base)
def test_method_ops(self):
ops = [
'difference', 'intersection', 'isdisjoint',
'issubset', 'issuperset', 'symmetric_difference', 'union',
'difference_update', 'intersection_update',
'symmetric_difference_update', 'update']
for op in ops:
if not hasattr(set, op):
continue # Not in this version of Python.
# Make a copy, as some of the ops are mutating.
lazy = LazySet(set(self.base))
base = set(self.base)
self.assertEqual(
getattr(self.lazy, op)(set([1])),
getattr(self.base, op)(set([1])), op)
self.assertEqual(self.lazy, self.base, op)
def test_discard(self):
self.base.discard(1)
self.assertNotEqual(self.lazy, self.base)
self.lazy.discard(1)
self.assertEqual(self.lazy, self.base)
def test_pop(self):
self.assertEqual(self.lazy.pop(), self.base.pop())
self.assertEqual(self.lazy, self.base)
def test_remove(self):
self.base.remove(2)
self.lazy.remove(2)
self.assertEqual(self.lazy, self.base)
def test_clear(self):
self.lazy.clear()
self.assertEqual(self.lazy, set())
if __name__ == '__main__':
warnings.simplefilter("error") # Warnings should be fatal in tests.
unittest.main()
| bsd-2-clause |
jhjguxin/blogserver | lib/python2.7/site-packages/PIL/GbrImagePlugin.py | 40 | 1628 | #
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
def i32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24L)
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
##
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError, "not a GIMP brush"
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError, "not a GIMP brush"
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.fromstring(self.data)
self.data = ""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
| mit |
kyro38/TagData | osmAPI.py | 1 | 11061 | #!/usr/bin/python
"""
Basicly what this script does :
* First it retrieve from the Overpass API every line of the TAG network (id 3300434)
* Each line has 2 to 6 directions (also known as mutliple banches).
* Each direction has a sorted list of station. Some of them are shared (main trunk)
* By comparing each directions there are merged into 2.
* Returns a list of each lines with osmID, line Nane, list of stations for each direction.
"""
import sys
import console
import xml.dom.minidom
import itertools
from pprint import pprint
import json
import getopt
from bs4 import BeautifulSoup
import OsmTagStations as ots
import localPageCache
try:
opts, args = getopt.getopt(sys.argv[1:], "v", ["print"])
except getopt.GetoptError:
print('getopt error in osmAPI.py')
sys.exit(2)
verbose = False
for opt, arg in opts:
if opt == '-v':
verbose = True
def main():
parseOsmTAGRelation()
'''
OSM has a list of lines for the TAG network.
Data can be retrieved using the Overpass API.
'''
def parseOsmTAGRelation() :
networkId = '3921495'
url = "http://overpass-api.de/api/interpreter?data=relation%28"+networkId+"%29%3Brel%28r%29%3Bout%20body%3B%0A"
s = localPageCache.getPage(url)
# Parsing the Overpass API result of TAG relation 3300434
soup = BeautifulSoup(s)
lineRelations = soup.findAll("relation")
# Progress bar is a cool feature.
(termWidth, height) = console.getTerminalSize()
#total = len(lineRelations)
#index = 0
lines = list()
for aLine in lineRelations : # 48 elements
# index = index+1
# percentage = index / total
# sys.stdout.write("\r")
# for i in range(int(termWidth*percentage)):
# sys.stdout.write("-")
# sys.stdout.flush()
myLine = OsmLine(aLine) # each objects parse the data related to its line
lines.append(myLine)
jsonOutput = json.dumps(lines, indent=4, sort_keys=True, cls=OsmLineEncoder)
if verbose:
print(jsonOutput)
return jsonOutput
'''
This class represents the data parsed for OSM for a particular line
It has
* The line name (1,2,3,A,B etc)
* The stations for each directions (lines have 2 directions)
* The ID of the line on OSM
'''
class OsmLine :
def __init__(self,node):
self.name = node.find(k="name")['v']
self.relationId = int(node["id"])
directions = node.findAll("member")
self.directions = list()
self.stationsSensA= list() # stationID for a given Sens
self.stationsSensB= list() # stationID for a given Sens
self.terminusA = list()
self.terminusB = list()
for aDirection in directions:
self.directions.append(OsmDirection(aDirection["ref"]))
index = 0
while not self.dispatchDirections(index): #While It can't dispatch,
index = index+1;
def __repr__(self):
return "OsmLine()"
def __str__(self):
return str(self.relationId) + " - ligne : " + self.name + " - " + str(len(self.directions)) + " directions"
# Splitting the directions in 2 categories
# Station at Index must be a station shared by each subDirection
# Returns false if it could not disptach the station with the index
def dispatchDirections(self, index):
# Using the first direction for the base of the comparison.
baseStations = self.directions[0].stations()
# First direction is sensA by default
if index < len(baseStations):
aId = baseStations[index]
else:
print (" ", len(baseStations), self.name)
quit()
# Related stations are all the stations at location (== same name)
relatedStations = ots.relatedStations(aId)
# Search for a station present in every direction
for aDirection in self.directions[1:]: # Since index 0 is the base for this, skipping it every time.
if ots.isSoloStation(aId):
if aId not in aDirection.stations():
return
else:
if not set.intersection(set(aDirection.stations()),set(relatedStations)):
return False
# Skipping the station if its present multiple times on a track (ex: dead-end loop in middle of the line)
if ots.hasDuplicateName(baseStations, index):
return False
# Skipping when previous and next station == previous station (occurs in dead-end loop like above)
if index-1 >= 0 and index+1 < len(self.directions[0].stations()): # bounds checking
if ots.stationName(baseStations[index-1]) == ots.stationName(baseStations[index+1]):
return False
# At this point we have to station we need
# now comparing the next or the previous station
nextStationId = baseStations[index+1]
if(index > 0):
previousStationId = baseStations[index-1]
# Lists for where the station will be added
sensA = [self.directions[0]] # Already adding stations of the first direction
sensB = list()
self.terminusA.append(self.directions[0][-1])
# Actually dispatching the directions
for aDirection in self.directions[1:]: # skipping index 0
# Index of the sharedStation for this direction
# The intersection should return only one item.
# If not there is a problem in selecting the station
if ots.isSoloStation(aId):
sharedStation = [aId]
else:
sharedStation = set.intersection(set(aDirection.stations()), set(relatedStations))
if len(sharedStation) == 1:
# Index of the station for this direction
stationIndex = aDirection.stations().index(sharedStation.pop())
# The next Station is the same than for the 1st sub-direction
if stationIndex < len(aDirection.stations())-1 and ots.isSameStation(nextStationId, aDirection[stationIndex+1]):
sensA.append(aDirection)
self.terminusA.append(aDirection[-1])
# The previous Station is the same than for the 1st sub-direction
elif index > 0 and ots.isSameStation(previousStationId, aDirection[stationIndex-1]):
sensA.append(aDirection)
self.terminusA.append(aDirection[-1])
# Every other case : It's the opposite direction of 1st sub-direction
else:
self.terminusB.append(aDirection[-1])
sensB.append(aDirection)
else:
print("ERROR IN SHARED STATION")
mergedDirectionA = list(itertools.chain.from_iterable(sensA))
mergedDirectionB = list(itertools.chain.from_iterable(sensB))
# Removing partial terminus, only keeping branch terminus & trunk terminus
for aTerminus in self.terminusA:
# sensA is a list of osmDirection objet, can't iterate directly therefore using itertools
if mergedDirectionA.count(aTerminus) > 1:
if(aTerminus == 1804374990):
print("coucou", mergedDirectionA.count(aTerminus))
self.terminusA.remove(aTerminus)
mergedDirectionA.remove(aTerminus)
for aTerminus in self.terminusB:
# sensB is a list of osmDirection objet, can't iterate directly therefore using itertools
if mergedDirectionB.count(aTerminus) > 1:
self.terminusB.remove(aTerminus)
mergedDirectionB.remove(aTerminus)
# Making a bigList of the stations for each direction. Always with unique values and ordered
# Ordered is important for the first direction as it will be use to compare with Mobitrans
self.stationsSensA[:] = unique(itertools.chain.from_iterable(sensA))
self.stationsSensB[:] = unique(itertools.chain.from_iterable(sensB))
return True
def testStationSum(self, directionSet):
resultSet = set()
for aDirection in directionSet:
url = "http://api.openstreetmap.org/api/0.6/relation/"+str(aDirection.id)
s = localPageCache.getPage(url)
soup = BeautifulSoup(s)
orderedStations = soup.findAll(member_role_stop)
for aStation in orderedStations:
resultSet.add(int(aStation["ref"]))
#print([x.id for x in directionSet])
return len(resultSet)
class OsmLineEncoder(json.JSONEncoder):
'''
JSONEncoder for the OSMLine class (and included elements).
'''
def default(self, obj):
if isinstance(obj, OsmLine):
aDict = dict()
aDict["name"] = obj.name
aDict["OsmId"] = obj.relationId
aDict["sensA"] = obj.stationsSensA
aDict["sensB"] = obj.stationsSensB
aDict["terminusA"] = obj.terminusA
aDict["terminusB"] = obj.terminusB
return aDict
class OsmDirection(object):
'''
Every Line has at least 2 sub-direction, sometimes more.
This is the representation for each of them
'''
def __init__(self, id):
self.id = int(id) # OSM reference for the direction
self.__stations = list() # ordered list of stations for the direction
# This class is iterable
def __iter__(self):
return iter(self.__stations)
# Also possible to directly access the stations
def __getitem__(self, key):
return self.__stations[key]
def stations(self):
if len(self.__stations) == 0:
self.fetchStations()
return self.__stations
# Fetch the ordered list of stations
def fetchStations(self):
# Overpass doesn't provide a ordered detailled list, so it uses the base OSM API.
url = "http://api.openstreetmap.org/api/0.6/relation/" + str(self.id)
#f = urlopen(url)
#s = f.read()
s = localPageCache.getPage(url)
soup = BeautifulSoup(s)
orderedStations = soup.findAll(member_role_stop)
for aStation in orderedStations:
# Only storing the OSM node ID of the station
self.__stations.append(int(aStation["ref"]))
if not ots.hasStation(int(aStation["ref"])):
print("Error : ", int(aStation["ref"]), "not present")
def member_role_stop(tag):
'''
fonction called by BeautifulSoup for find XML member nodes where role="stop"
'''
return tag.name == "member" and tag['role'] == "stop"
def unique(seq):
"""
Return a list with unique elements and with the same order as the one given in parameters
"""
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def printXML(xmlStr):
xml2 = xml.dom.minidom.parseString(xmlStr)
print(xml2.toprettyxml())
if __name__ == '__main__':
main()
| apache-2.0 |
amit0701/rally | rally/verification/tempest/compare2html.py | 8 | 1213 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Output verification comparison results in html."""
from rally.ui import utils as ui_utils
__description__ = "List differences between two verification runs"
__title__ = "Verification Comparison"
__version__ = "0.1"
def create_report(results):
template_kw = {
"heading": {
"title": __title__,
"description": __description__,
"parameters": [("Difference Count", len(results))]
},
"generator": "compare2html %s" % __version__,
"results": results
}
template = ui_utils.get_template("verification/compare.mako")
output = template.render(**template_kw)
return output.encode("utf8")
| apache-2.0 |
schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Plugins/SystemPlugins/NetworkWizard/NetworkWizard.py | 42 | 13887 | from boxbranding import getMachineBrand, getMachineName, getBoxType
from os import system
from enigma import eTimer
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Components.Pixmap import Pixmap
from Components.Sources.Boolean import Boolean
from Components.Network import iNetwork
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
class NetworkWizard(WizardLanguage, Rc):
skin = """
<screen position="0,0" size="720,576" title="Welcome..." flags="wfNoBorder" >
<widget name="text" position="153,40" size="340,300" font="Regular;22" />
<widget source="list" render="Listbox" position="53,340" size="440,180" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<widget name="config" position="53,340" zPosition="1" size="440,180" transparent="1" scrollbarMode="showOnDemand" />
<ePixmap pixmap="buttons/button_red.png" position="40,225" zPosition="0" size="15,16" transparent="1" alphatest="on" />
<widget name="languagetext" position="55,225" size="95,30" font="Regular;18" />
<widget name="wizard" pixmap="/wizard.png" position="40,50" zPosition="10" size="110,174" alphatest="on" />
<widget name="rc" pixmaps="rc.png,rcold.png" position="500,50" zPosition="10" size="154,500" alphatest="on" />
<widget name="arrowdown" pixmap="arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowdown2" pixmap="arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup" pixmap="arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup2" pixmap="arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget source="VKeyIcon" render="Pixmap" pixmap="buttons/key_text.png" position="40,260" zPosition="0" size="35,25" transparent="1" alphatest="on" >
<convert type="ConditionalShowHide" />
</widget>
<widget name="HelpWindow" pixmap="buttons/key_text.png" position="125,170" zPosition="1" size="1,1" transparent="1" alphatest="on" />
</screen>"""
def __init__(self, session, interface = None):
self.xmlfile = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkWizard/networkwizard.xml")
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
Screen.setTitle(self, _("NetworkWizard"))
self.session = session
self["wizard"] = Pixmap()
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self.InstalledInterfaceCount = None
self.Adapterlist = None
self.InterfaceState = None
self.isInterfaceUp = None
self.WlanPluginInstalled = False
self.ap = None
self.w = None
if interface is not None:
self.selectedInterface = interface
else:
self.selectedInterface = None
self.NextStep = None
self.resetRef = None
self.checkRef = None
self.AdapterRef = None
self.APList = None
self.newAPlist = None
self.oldlist = None
self.originalInterfaceState = {}
self.originalInterfaceStateChanged = False
self.Text = None
self.rescanTimer = eTimer()
self.rescanTimer.callback.append(self.rescanTimerFired)
self.getInstalledInterfaceCount()
self.isWlanPluginInstalled()
def exitWizardQuestion(self, ret = False):
if ret:
self.markDone()
self.close()
def markDone(self):
self.stopScan()
del self.rescanTimer
self.checkOldInterfaceState()
self.exit()
pass
def back(self):
self.stopScan()
self.ap = None
WizardLanguage.back(self)
def stopScan(self):
self.rescanTimer.stop()
if self.w is not None:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
iWlan.stopGetNetworkList()
self.w = None
def getInstalledInterfaceCount(self):
self.originalInterfaceState = {}
self.Adapterlist = iNetwork.getAdapterList()
self.InstalledInterfaceCount = len(self.Adapterlist)
if self.Adapterlist is not None:
if self.InstalledInterfaceCount == 1 and self.selectedInterface is None:
self.selectedInterface = self.Adapterlist[0]
for interface in iNetwork.getAdapterList():
self.originalInterfaceState[interface] = {}
self.originalInterfaceState[interface]["up"] = iNetwork.getAdapterAttribute(interface, 'up')
def selectInterface(self):
self.InterfaceState = None
if self.selectedInterface is None:
if self.InstalledInterfaceCount <= 1:
if not iNetwork.isWirelessInterface(self.selectedInterface):
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
self.checkInterface(self.selectedInterface)
else:
self.NextStep = 'selectinterface'
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
if not iNetwork.isWirelessInterface(self.selectedInterface):
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
self.checkInterface(self.selectedInterface)
def checkOldInterfaceState(self):
# disable up interface if it was originally down and config is unchanged.
if self.originalInterfaceStateChanged is False:
for interface in self.originalInterfaceState.keys():
if interface == self.selectedInterface:
if self.originalInterfaceState[interface]["up"] is False:
if iNetwork.checkforInterface(interface) is True:
system("ifconfig " + interface + " down")
def listInterfaces(self):
self.checkOldInterfaceState()
list = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
list.append((_("Exit network wizard"), "end"))
return list
def InterfaceSelectionMade(self, index):
self.selectedInterface = index
self.InterfaceSelect(index)
def InterfaceSelect(self, index):
if index == 'end':
self.NextStep = 'end'
elif index == 'eth0':
self.NextStep = 'nwconfig'
elif index == 'eth1' and getBoxType() == "et10000":
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
def InterfaceSelectionMoved(self):
self.InterfaceSelect(self.selection)
def checkInterface(self,iface):
self.stopScan()
if self.Adapterlist is None:
self.Adapterlist = iNetwork.getAdapterList()
if self.NextStep is not 'end':
if len(self.Adapterlist) == 0:
#Reset Network to defaults if network broken
iNetwork.resetNetworkConfig('lan', self.resetNetworkConfigCB)
self.resetRef = self.session.openWithCallback(self.resetNetworkConfigFinished, MessageBox, _("Please wait while we prepare your network interfaces..."), type = MessageBox.TYPE_INFO, enable_input = False)
if iface in iNetwork.getInstalledAdapters():
if iface in iNetwork.configuredNetworkAdapters and len(iNetwork.configuredNetworkAdapters) == 1:
if iNetwork.getAdapterAttribute(iface, 'up') is True:
self.isInterfaceUp = True
else:
self.isInterfaceUp = False
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.isInterfaceUp = iNetwork.checkforInterface(iface)
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.resetNetworkConfigFinished(False)
def resetNetworkConfigFinished(self,data):
if data is True:
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
def resetNetworkConfigCB(self,callback,iface):
if callback is not None:
if callback is True:
iNetwork.getInterfaces(self.getInterfacesFinished)
def getInterfacesFinished(self, data):
if data is True:
if iNetwork.getAdapterAttribute(self.selectedInterface, 'up') is True:
self.isInterfaceUp = True
else:
self.isInterfaceUp = False
self.resetRef.close(True)
else:
print "we should never come here!"
def AdapterSetupEnd(self, iface):
self.originalInterfaceStateChanged = True
if iNetwork.getAdapterAttribute(iface, "dhcp") is True:
iNetwork.checkNetworkState(self.AdapterSetupEndFinished)
self.AdapterRef = self.session.openWithCallback(self.AdapterSetupEndCB, MessageBox, _("Please wait while we test your network..."), type = MessageBox.TYPE_INFO, enable_input = False)
else:
self.currStep = self.getStepWithID("confdns")
self.afterAsyncCode()
def AdapterSetupEndCB(self,data):
if data is True:
if iNetwork.isWirelessInterface(self.selectedInterface):
if self.WlanPluginInstalled:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
iStatus.getDataForInterface(self.selectedInterface,self.checkWlanStateCB)
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
def AdapterSetupEndFinished(self,data):
if data <= 2:
self.InterfaceState = True
else:
self.InterfaceState = False
self.AdapterRef.close(True)
def checkWlanStateCB(self,data,status):
if data is not None:
if data is True:
if status is not None:
text1 = _("Your %s %s is now ready to be used.\n\nYour internet connection is working now.\n\n") % (getMachineBrand(), getMachineName())
text2 = _('Accesspoint:') + "\t" + str(status[self.selectedInterface]["accesspoint"]) + "\n"
text3 = _('SSID:') + "\t" + str(status[self.selectedInterface]["essid"]) + "\n"
text4 = _('Link quality:') + "\t" + str(status[self.selectedInterface]["quality"])+ "\n"
text5 = _('Signal strength:') + "\t" + str(status[self.selectedInterface]["signal"]) + "\n"
text6 = _('Bitrate:') + "\t" + str(status[self.selectedInterface]["bitrate"]) + "\n"
text7 = _('Encryption:') + " " + str(status[self.selectedInterface]["encryption"]) + "\n"
text8 = _("Please press OK to continue.")
infotext = text1 + text2 + text3 + text4 + text5 + text7 +"\n" + text8
self.currStep = self.getStepWithID("checkWlanstatusend")
self.Text = infotext
if str(status[self.selectedInterface]["accesspoint"]) == "Not-Associated":
self.InterfaceState = False
self.afterAsyncCode()
def checkNetwork(self):
iNetwork.checkNetworkState(self.checkNetworkStateCB)
self.checkRef = self.session.openWithCallback(self.checkNetworkCB, MessageBox, _("Please wait while we test your network..."), type = MessageBox.TYPE_INFO, enable_input = False)
def checkNetworkCB(self,data):
if data is True:
if iNetwork.isWirelessInterface(self.selectedInterface):
if self.WlanPluginInstalled:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
iStatus.getDataForInterface(self.selectedInterface,self.checkWlanStateCB)
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
def checkNetworkStateCB(self,data):
if data <= 2:
self.InterfaceState = True
else:
self.InterfaceState = False
self.checkRef.close(True)
def rescanTimerFired(self):
self.rescanTimer.stop()
self.updateAPList()
def updateAPList(self):
self.oldlist = self.APList
self.newAPlist = []
newList = []
newListIndex = None
currentListEntry = None
newList = self.listAccessPoints()
for oldentry in self.oldlist:
if oldentry not in newList:
newList.append(oldentry)
for newentry in newList:
self.newAPlist.append(newentry)
if len(self.newAPlist):
if self.wizard[self.currStep].has_key("dynamiclist"):
currentListEntry = self["list"].getCurrent()
if currentListEntry is not None:
idx = 0
for entry in self.newAPlist:
if entry == currentListEntry:
newListIndex = idx
idx +=1
self.wizard[self.currStep]["evaluatedlist"] = self.newAPlist
self['list'].setList(self.newAPlist)
if newListIndex is not None:
self["list"].setIndex(newListIndex)
self["list"].updateList(self.newAPlist)
def listAccessPoints(self):
self.APList = []
if self.WlanPluginInstalled is False:
self.APList.append( ( _("No networks found"), None ) )
else:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
iWlan.setInterface(self.selectedInterface)
self.w = iWlan.getInterface()
aps = iWlan.getNetworkList()
if aps is not None:
print "[NetworkWizard.py] got Accespoints!"
tmplist = []
complist = []
for ap in aps:
a = aps[ap]
if a['active']:
tmplist.append( (a['bssid'], a['essid']) )
complist.append( (a['bssid'], a['essid']) )
for entry in tmplist:
if entry[1] == "":
for compentry in complist:
if compentry[0] == entry[0]:
complist.remove(compentry)
for entry in complist:
self.APList.append( (entry[1], entry[1]) )
if not len(aps):
self.APList.append( ( _("No networks found"), None ) )
self.rescanTimer.start(4000)
return self.APList
def AccessPointsSelectionMoved(self):
self.ap = self.selection
self.NextStep = 'wlanconfig'
def checkWlanSelection(self):
self.stopScan()
self.currStep = self.getStepWithID(self.NextStep)
def isWlanPluginInstalled(self):
try:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
except ImportError:
self.WlanPluginInstalled = False
else:
self.WlanPluginInstalled = True
def listChoices(self):
self.stopScan()
list = []
if self.WlanPluginInstalled:
list.append((_("Configure your wireless LAN again"), "scanwlan"))
list.append((_("Configure your internal LAN"), "nwconfig"))
list.append((_("Exit network wizard"), "end"))
return list
def ChoicesSelectionMade(self, index):
self.ChoicesSelect(index)
def ChoicesSelect(self, index):
if index == 'end':
self.NextStep = 'end'
elif index == 'nwconfig':
self.selectedInterface = "eth0"
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
def ChoicesSelectionMoved(self):
pass
| gpl-2.0 |
sanjayankur31/hamster | wafadmin/TaskGen.py | 7 | 10634 | #!/usr/bin/env python2
# encoding: utf-8
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
import os,traceback,copy
import Build,Task,Utils,Logs,Options
from Logs import debug,error,warn
from Constants import*
typos={'sources':'source','targets':'target','include':'includes','define':'defines','importpath':'importpaths','install_var':'install_path','install_subdir':'install_path','inst_var':'install_path','inst_dir':'install_path','feature':'features',}
class register_obj(type):
def __init__(cls,name,bases,dict):
super(register_obj,cls).__init__(name,bases,dict)
name=cls.__name__
suffix='_taskgen'
if name.endswith(suffix):
task_gen.classes[name.replace(suffix,'')]=cls
class task_gen(object):
__metaclass__=register_obj
mappings={}
mapped={}
prec=Utils.DefaultDict(list)
traits=Utils.DefaultDict(set)
classes={}
def __init__(self,*kw,**kwargs):
self.prec=Utils.DefaultDict(list)
self.source=''
self.target=''
self.meths=[]
self.mappings={}
self.features=list(kw)
self.tasks=[]
self.default_chmod=O644
self.default_install_path=None
self.allnodes=[]
self.bld=kwargs.get('bld',Build.bld)
self.env=self.bld.env.copy()
self.path=self.bld.path
self.name=''
self.idx=self.bld.idx[self.path.id]=self.bld.idx.get(self.path.id,0)+1
for key,val in kwargs.iteritems():
setattr(self,key,val)
self.bld.task_manager.add_task_gen(self)
self.bld.all_task_gen.append(self)
def __str__(self):
return("<task_gen '%s' of type %s defined in %s>"%(self.name or self.target,self.__class__.__name__,str(self.path)))
def __setattr__(self,name,attr):
real=typos.get(name,name)
if real!=name:
warn('typo %s -> %s'%(name,real))
if Logs.verbose>0:
traceback.print_stack()
object.__setattr__(self,real,attr)
def to_list(self,value):
if isinstance(value,str):return value.split()
else:return value
def apply(self):
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=task_gen.traits[x]
if not st:
warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(st)
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:raise Utils.WafError("graph has a cycle %s"%str(prec))
out.reverse()
self.meths=out
debug('task_gen: posting %s %d',self,id(self))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Utils.WafError("tried to retrieve %s which is not a valid method"%x)
debug('task_gen: -> %s (%d)',x,id(self))
v()
def post(self):
if not self.name:
if isinstance(self.target,list):
self.name=' '.join(self.target)
else:
self.name=self.target
if getattr(self,'posted',None):
return
self.apply()
self.posted=True
debug('task_gen: posted %s',self.name)
def get_hook(self,ext):
try:return self.mappings[ext]
except KeyError:
try:return task_gen.mappings[ext]
except KeyError:return None
def create_task(self,name,src=None,tgt=None,env=None):
env=env or self.env
task=Task.TaskBase.classes[name](env.copy(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def name_to_obj(self,name):
return self.bld.name_to_obj(name,self.env)
def find_sources_in_dirs(self,dirnames,excludes=[],exts=[]):
err_msg="'%s' attribute must be a list"
if not isinstance(excludes,list):
raise Utils.WscriptError(err_msg%'excludes')
if not isinstance(exts,list):
raise Utils.WscriptError(err_msg%'exts')
lst=[]
dirnames=self.to_list(dirnames)
ext_lst=exts or list(self.mappings.keys())+list(task_gen.mappings.keys())
for name in dirnames:
anode=self.path.find_dir(name)
if not anode or not anode.is_child_of(self.bld.srcnode):
raise Utils.WscriptError("Unable to use '%s' - either because it's not a relative path"", or it's not child of '%s'."%(name,self.bld.srcnode))
self.bld.rescan(anode)
for name in self.bld.cache_dir_contents[anode.id]:
if name.startswith('.'):
continue
(base,ext)=os.path.splitext(name)
if ext in ext_lst and not name in lst and not name in excludes:
lst.append((anode.relpath_gen(self.path)or'.')+os.path.sep+name)
lst.sort()
self.source=self.to_list(self.source)
if not self.source:self.source=lst
else:self.source+=lst
def clone(self,env):
newobj=task_gen(bld=self.bld)
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in["path","features"]:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.__class__=self.__class__
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].copy()
else:
newobj.env=env.copy()
return newobj
def get_inst_path(self):
return getattr(self,'_install_path',getattr(self,'default_install_path',''))
def set_inst_path(self,val):
self._install_path=val
install_path=property(get_inst_path,set_inst_path)
def get_chmod(self):
return getattr(self,'_chmod',getattr(self,'default_chmod',O644))
def set_chmod(self,val):
self._chmod=val
chmod=property(get_chmod,set_chmod)
def declare_extension(var,func):
try:
for x in Utils.to_list(var):
task_gen.mappings[x]=func
except:
raise Utils.WscriptError('declare_extension takes either a list or a string %r'%var)
task_gen.mapped[func.__name__]=func
def declare_order(*k):
assert(len(k)>1)
n=len(k)-1
for i in xrange(n):
f1=k[i]
f2=k[i+1]
if not f1 in task_gen.prec[f2]:
task_gen.prec[f2].append(f1)
def declare_chain(name='',action='',ext_in='',ext_out='',reentrant=True,color='BLUE',install=0,before=[],after=[],decider=None,rule=None,scan=None):
action=action or rule
if isinstance(action,str):
act=Task.simple_task_type(name,action,color=color)
else:
act=Task.task_type_from_func(name,action,color=color)
act.ext_in=tuple(Utils.to_list(ext_in))
act.ext_out=tuple(Utils.to_list(ext_out))
act.before=Utils.to_list(before)
act.after=Utils.to_list(after)
act.scan=scan
def x_file(self,node):
if decider:
ext=decider(self,node)
else:
ext=ext_out
if isinstance(ext,str):
out_source=node.change_ext(ext)
if reentrant:
self.allnodes.append(out_source)
elif isinstance(ext,list):
out_source=[node.change_ext(x)for x in ext]
if reentrant:
for i in xrange((reentrant is True)and len(out_source)or reentrant):
self.allnodes.append(out_source[i])
else:
raise Utils.WafError("do not know how to process %s"%str(ext))
tsk=self.create_task(name,node,out_source)
if node.__class__.bld.is_install:
tsk.install=install
declare_extension(act.ext_in,x_file)
return x_file
def bind_feature(name,methods):
lst=Utils.to_list(methods)
task_gen.traits[name].update(lst)
def taskgen(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
task_gen.traits[name].update([func.__name__])
return func
return deco
def before(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
def after(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
def extension(var):
def deco(func):
setattr(task_gen,func.__name__,func)
try:
for x in Utils.to_list(var):
task_gen.mappings[x]=func
except:
raise Utils.WafError('extension takes either a list or a string %r'%var)
task_gen.mapped[func.__name__]=func
return func
return deco
def apply_core(self):
find_resource=self.path.find_resource
for filename in self.to_list(self.source):
x=self.get_hook(filename)
if x:
x(self,filename)
else:
node=find_resource(filename)
if not node:raise Utils.WafError("source not found: '%s' in '%s'"%(filename,str(self.path)))
self.allnodes.append(node)
for node in self.allnodes:
x=self.get_hook(node.suffix())
if not x:
raise Utils.WafError("Cannot guess how to process %s (got mappings %r in %r) -> try conf.check_tool(..)?"%(str(node),self.__class__.mappings.keys(),self.__class__))
x(self,node)
feature('*')(apply_core)
def exec_rule(self):
if not getattr(self,'rule',None):
return
try:
self.meths.remove('apply_core')
except ValueError:
pass
func=self.rule
vars2=[]
if isinstance(func,str):
(func,vars2)=Task.compile_fun('',self.rule,shell=getattr(self,'shell',True))
func.code=self.rule
name=getattr(self,'name',None)or self.target or self.rule
if not isinstance(name,str):
name=str(self.idx)
cls=Task.task_type_from_func(name,func,getattr(self,'vars',vars2))
tsk=self.create_task(name)
dep_vars=getattr(self,'dep_vars',['ruledeps'])
if dep_vars:
tsk.dep_vars=dep_vars
if isinstance(self.rule,str):
tsk.env.ruledeps=self.rule
else:
tsk.env.ruledeps=Utils.h_fun(self.rule)
if getattr(self,'target',None):
cls.quiet=True
tsk.outputs=[self.path.find_or_declare(x)for x in self.to_list(self.target)]
if getattr(self,'source',None):
cls.quiet=True
tsk.inputs=[]
for x in self.to_list(self.source):
y=self.path.find_resource(x)
if not y:
raise Utils.WafError('input file %r could not be found (%r)'%(x,self.path.abspath()))
tsk.inputs.append(y)
if self.allnodes:
tsk.inputs.extend(self.allnodes)
if getattr(self,'scan',None):
cls.scan=self.scan
if getattr(self,'install_path',None):
tsk.install_path=self.install_path
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
if getattr(self,'on_results',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
feature('*')(exec_rule)
before('apply_core')(exec_rule)
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
feature('seq')(sequence_order)
| gpl-3.0 |
yrobla/nova | nova/api/openstack/compute/versions.py | 14 | 7798 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.views import versions as views_versions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common import timeutils
LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.openstack.org/'
'api/openstack-compute/2/wadl/os-compute-2.wadl'
},
}
VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
}
],
}
}
class MediaTypesTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return 'media-types' in datum
def make_version(elem):
elem.set('id')
elem.set('status')
elem.set('updated')
mts = MediaTypesTemplateElement('media-types')
elem.append(mts)
mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types')
mt.set('base')
mt.set('type')
xmlutil.make_links(elem, 'links')
version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class VersionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('version', selector='version')
make_version(root)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class VersionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('versions')
elem = xmlutil.SubTemplateElement(root, 'version', selector='versions')
make_version(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class ChoicesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('choices')
elem = xmlutil.SubTemplateElement(root, 'version', selector='choices')
make_version(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class AtomSerializer(wsgi.XMLDictSerializer):
NSMAP = {None: xmlutil.XMLNS_ATOM}
def __init__(self, metadata=None, xmlns=None):
self.metadata = metadata or {}
if not xmlns:
self.xmlns = wsgi.XMLNS_ATOM
else:
self.xmlns = xmlns
def _get_most_recent_update(self, versions):
recent = None
for version in versions:
updated = timeutils.parse_strtime(version['updated'],
'%Y-%m-%dT%H:%M:%SZ')
if not recent:
recent = updated
elif updated > recent:
recent = updated
return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
def _get_base_url(self, link_href):
# Make sure no trailing /
link_href = link_href.rstrip('/')
return link_href.rsplit('/', 1)[0] + '/'
def _create_feed(self, versions, feed_title, feed_id):
feed = etree.Element('feed', nsmap=self.NSMAP)
title = etree.SubElement(feed, 'title')
title.set('type', 'text')
title.text = feed_title
# Set this updated to the most recently updated version
recent = self._get_most_recent_update(versions)
etree.SubElement(feed, 'updated').text = recent
etree.SubElement(feed, 'id').text = feed_id
link = etree.SubElement(feed, 'link')
link.set('rel', 'self')
link.set('href', feed_id)
author = etree.SubElement(feed, 'author')
etree.SubElement(author, 'name').text = 'Rackspace'
etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/'
for version in versions:
feed.append(self._create_version_entry(version))
return feed
def _create_version_entry(self, version):
entry = etree.Element('entry')
etree.SubElement(entry, 'id').text = version['links'][0]['href']
title = etree.SubElement(entry, 'title')
title.set('type', 'text')
title.text = 'Version %s' % version['id']
etree.SubElement(entry, 'updated').text = version['updated']
for link in version['links']:
link_elem = etree.SubElement(entry, 'link')
link_elem.set('rel', link['rel'])
link_elem.set('href', link['href'])
if 'type' in link:
link_elem.set('type', link['type'])
content = etree.SubElement(entry, 'content')
content.set('type', 'text')
content.text = 'Version %s %s (%s)' % (version['id'],
version['status'],
version['updated'])
return entry
class VersionsAtomSerializer(AtomSerializer):
def default(self, data):
versions = data['versions']
feed_id = self._get_base_url(versions[0]['links'][0]['href'])
feed = self._create_feed(versions, 'Available API Versions', feed_id)
return self._to_xml(feed)
class VersionAtomSerializer(AtomSerializer):
def default(self, data):
version = data['version']
feed_id = version['links'][0]['href']
feed = self._create_feed([version], 'About This Version', feed_id)
return self._to_xml(feed)
class Versions(wsgi.Resource):
def __init__(self):
super(Versions, self).__init__(None)
@wsgi.serializers(xml=VersionsTemplate,
atom=VersionsAtomSerializer)
def index(self, req):
"""Return all versions."""
builder = views_versions.get_view_builder(req)
return builder.build_versions(VERSIONS)
@wsgi.serializers(xml=ChoicesTemplate)
@wsgi.response(300)
def multi(self, req):
"""Return multiple choices."""
builder = views_versions.get_view_builder(req)
return builder.build_choices(VERSIONS, req)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
args = {}
if request_environment['PATH_INFO'] == '/':
args['action'] = 'index'
else:
args['action'] = 'multi'
return args
class VersionV2(object):
@wsgi.serializers(xml=VersionTemplate,
atom=VersionAtomSerializer)
def show(self, req):
builder = views_versions.get_view_builder(req)
return builder.build_version(VERSIONS['v2.0'])
def create_resource():
return wsgi.Resource(VersionV2())
| apache-2.0 |
x2nie/odoo | addons/lunch/report/order.py | 377 | 2637 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp.osv import osv
class order(report_sxw.rml_parse):
def get_lines(self, user,objects):
lines=[]
for obj in objects:
if user.id==obj.user_id.id:
lines.append(obj)
return lines
def get_total(self, user,objects):
lines=[]
for obj in objects:
if user.id==obj.user_id.id:
lines.append(obj)
total=0.0
for line in lines:
total+=line.price
self.net_total+=total
return total
def get_nettotal(self):
return self.net_total
def get_users(self, objects):
users=[]
for obj in objects:
if obj.user_id not in users:
users.append(obj.user_id)
return users
def get_note(self,objects):
notes=[]
for obj in objects:
notes.append(obj.note)
return notes
def __init__(self, cr, uid, name, context):
super(order, self).__init__(cr, uid, name, context)
self.net_total=0.0
self.localcontext.update({
'time': time,
'get_lines': self.get_lines,
'get_users': self.get_users,
'get_total': self.get_total,
'get_nettotal': self.get_nettotal,
'get_note': self.get_note,
})
class report_lunchorder(osv.AbstractModel):
_name = 'report.lunch.report_lunchorder'
_inherit = 'report.abstract_report'
_template = 'lunch.report_lunchorder'
_wrapped_report_class = order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tlakshman26/cinder-bug-fix-volume-conversion-full | cinder/tests/unit/scheduler/test_capacity_weigher.py | 17 | 14495 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Capacity Weigher.
"""
import mock
from oslo_config import cfg
from cinder import context
from cinder.openstack.common.scheduler import weights
from cinder.scheduler.weights import capacity
from cinder import test
from cinder.tests.unit.scheduler import fakes
from cinder.volume import utils
CONF = cfg.CONF
class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_hosts(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects(
[capacity.CapacityWeigher],
hosts,
weight_properties)
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic,
disabled=disabled)
host_states = self.host_manager.get_all_host_states(ctxt)
_mock_service_get_all_by_topic.assert_called_once_with(
ctxt, CONF.volume_topic, disabled=disabled)
return host_states
# If thin_provisioning_support = False, use the following formula:
# free = free_space - math.floor(total * reserved)
# Otherwise, use the following formula:
# free = (total * host_state.max_over_subscription_ratio
# - host_state.provisioned_capacity_gb
# - math.floor(total * reserved))
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=1024-math.floor(1024*0.1)=922
# Norm=0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=2048*1.5-1748-math.floor(2048*0.1)=1120
# Norm=1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=256-512*0=256
# Norm=0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=2048*1.0-2047-math.floor(2048*0.05)=-101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-1
# Norm=0.0819000819001
# so, host2 should win:
weighed_host = self._get_weighed_hosts(hostinfo_list)[0]
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual('host2', utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_multiplier1(self):
self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=-(1024-math.floor(1024*0.1))=-922
# Norm=-0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=-(256-512*0)=-256
# Norm=--0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-float('inf')
# Norm=-1.0
# so, host4 should win:
weighed_host = self._get_weighed_hosts(hostinfo_list)[0]
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual('host4', utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_multiplier2(self):
self.flags(capacity_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))*2=1844
# Norm=1.67567567568
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
# Norm=2.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)*2=512
# Norm=0.584766584767
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# Norm=0.0
# host5: free_capacity_gb=unknown free=-2
# Norm=0.1638001638
# so, host2 should win:
weighed_host = self._get_weighed_hosts(hostinfo_list)[0]
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual('host2', utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
del self.host_manager.service_states['host5']
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm=-0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
# and host2 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host2', utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': None}
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': None}
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=unknown
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'infinite',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': None}
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=infinite free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': None}
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=infinite
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
| apache-2.0 |
oinopion/django | django/core/mail/backends/smtp.py | 477 | 5239 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.core.mail.utils import DNS_NAME
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
ssl_keyfile=None, ssl_certfile=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout
self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile
self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
if self.use_ssl:
connection_params.update({
'keyfile': self.ssl_keyfile,
'certfile': self.ssl_certfile,
})
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
| bsd-3-clause |
EdDev/vdsm | lib/vdsm/network/netinfo/bridges.py | 1 | 2859 | #
# Copyright 2015 Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
from __future__ import absolute_import
from functools import partial
from glob import iglob
import logging
import os
from vdsm.network.ipwrapper import Link
from .misc import visible_devs
BRIDGING_OPT = '/sys/class/net/%s/bridge/%s'
bridges = partial(visible_devs, Link.isBRIDGE)
def ports(bridge):
brif_path = os.path.join('/sys/class/net', bridge, 'brif')
if os.path.isdir(brif_path):
bridge_ports = os.listdir(brif_path)
else:
# We expect "bridge" to be a Linux bridge with interfaces. It is quite
# common that this is not the case, when the bridge is actually
# implemented by OVS (via our hook) or when the Linux bridge device is
# not yet up.
logging.warning('%s is not a Linux bridge', bridge)
bridge_ports = []
return bridge_ports
def _bridge_options(bridge, keys=None):
"""Returns a dictionary of bridge option name and value. E.g.,
{'max_age': '2000', 'gc_timer': '332'}"""
BR_KEY_BLACKLIST = ('flush',)
if keys is None:
paths = iglob(BRIDGING_OPT % (bridge, '*'))
else:
paths = (BRIDGING_OPT % (bridge, key) for key in keys)
opts = {}
for path in paths:
key = os.path.basename(path)
if key in BR_KEY_BLACKLIST:
continue
with open(path) as optFile:
opts[key] = optFile.read().rstrip()
return opts
def stp_state(bridge):
with open(BRIDGING_OPT % (bridge, 'stp_state')) as stp_file:
stp = stp_file.readline()
if stp == '1\n':
return 'on'
else:
return 'off'
def stp_booleanize(value):
if value is None:
return False
if type(value) is bool:
return value
if value.lower() in ('true', 'on', 'yes'):
return True
elif value.lower() in ('false', 'off', 'no'):
return False
else:
raise ValueError('Invalid value for bridge stp')
def info(link):
return {'ports': ports(link.name),
'stp': stp_state(link.name),
'opts': _bridge_options(link.name)}
| gpl-2.0 |
elsonrodriguez/madhatter | cobbler/action_replicate.py | 3 | 13381 | """
Replicate from a cobbler master.
Copyright 2007-2009, Red Hat, Inc
Michael DeHaan <[email protected]>
Scott Henson <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import xmlrpclib
import api as cobbler_api
import utils
from utils import _
from cexceptions import *
import clogger
import fnmatch
OBJ_TYPES = [ "distro", "profile", "system", "repo", "image" ]
class Replicate:
def __init__(self,config,logger=None):
"""
Constructor
"""
self.config = config
self.settings = config.settings()
self.api = config.api
self.remote = None
self.uri = None
if logger is None:
logger = clogger.Logger()
self.logger = logger
def rsync_it(self,from_path,to_path):
from_path = "%s::%s" % (self.host, from_path)
cmd = "rsync -avzH %s %s" % (from_path, to_path)
rc = utils.subprocess_call(self.logger, cmd, shell=True)
if rc !=0:
self.logger.info("rsync failed")
# -------------------------------------------------------
def remove_objects_not_on_master(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")
for (luid, ldata) in locals.iteritems():
if not remotes.has_key(luid):
try:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
except Exception, e:
utils.log_exc(self.logger)
# -------------------------------------------------------
def add_objects_not_on_local(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type], "uid")
remotes = utils.loh_sort_by_key(self.remote_data[obj_type],"depth")
remotes2 = utils.loh_to_hoh(self.remote_data[obj_type],"depth")
for rdata in remotes:
# do not add the system if it is not on the transfer list
if not self.must_include[obj_type].has_key(rdata["name"]):
continue
if not locals.has_key(rdata["uid"]):
creator = getattr(self.api, "new_%s" % obj_type)
newobj = creator()
newobj.from_datastruct(rdata)
try:
self.logger.info("adding %s %s" % (obj_type, rdata["name"]))
self.api.add_item(obj_type, newobj)
except Exception, e:
utils.log_exc(self.logger)
# -------------------------------------------------------
def replace_objects_newer_on_remote(self, otype):
locals = utils.loh_to_hoh(self.local_data[otype],"uid")
remotes = utils.loh_to_hoh(self.remote_data[otype],"uid")
for (ruid, rdata) in remotes.iteritems():
# do not add the system if it is not on the transfer list
if not self.must_include[otype].has_key(rdata["name"]):
continue
if locals.has_key(ruid):
ldata = locals[ruid]
if ldata["mtime"] < rdata["mtime"]:
if ldata["name"] != rdata["name"]:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
creator = getattr(self.api, "new_%s" % otype)
newobj = creator()
newobj.from_datastruct(rdata)
try:
self.logger.info("updating %s %s" % (otype, rdata["name"]))
self.api.add_item(otype, newobj)
except Exception, e:
utils.log_exc(self.logger)
# -------------------------------------------------------
def replicate_data(self):
self.local_data = {}
self.remote_data = {}
self.logger.info("Querying Both Servers")
for what in OBJ_TYPES:
self.remote_data[what] = self.remote.get_items(what)
self.local_data[what] = self.local.get_items(what)
self.generate_include_map()
# FIXME: this should be optional as we might want to maintain local system records
# and just keep profiles/distros common
if self.prune:
self.logger.info("Removing Objects Not Stored On Master")
obj_types = OBJ_TYPES
if len(self.system_patterns) == 0:
obj_types.remove("system")
for what in obj_types:
self.remove_objects_not_on_master(what)
else:
self.logger.info("*NOT* Removing Objects Not Stored On Master")
if not self.omit_data:
self.logger.info("Rsyncing distros")
for distro in self.must_include["distro"].keys():
if self.must_include["distro"][distro] == 1:
distro = self.remote.get_item('distro',distro)
if distro["breed"] == 'redhat':
dest = distro["kernel"]
top = None
while top != 'images' and top != '':
dest, top = os.path.split(dest)
if not dest == os.path.sep and len(dest) > 1:
parentdir = os.path.split(dest)[0]
if not os.path.isdir(parentdir):
os.makedirs(parentdir)
self.rsync_it("distro-%s"%distro["name"], dest)
self.logger.info("Rsyncing repos")
for repo in self.must_include["repo"].keys():
if self.must_include["repo"][repo] == 1:
self.rsync_it("repo-%s"%repo, os.path.join(self.settings.webdir,"repo_mirror",repo))
self.logger.info("Rsyncing distro repo configs")
self.rsync_it("cobbler-distros/config", os.path.join(self.settings.webdir,"ks_mirror"))
self.logger.info("Rsyncing kickstart templates & snippets")
self.rsync_it("cobbler-kickstarts","/var/lib/cobbler/kickstarts")
self.rsync_it("cobbler-snippets","/var/lib/cobbler/snippets")
self.logger.info("Rsyncing triggers")
self.rsync_it("cobbler-triggers","/var/lib/cobbler/triggers")
else:
self.logger.info("*NOT* Rsyncing Data")
self.logger.info("Removing Objects Not Stored On Local")
for what in OBJ_TYPES:
self.add_objects_not_on_local(what)
self.logger.info("Updating Objects Newer On Remote")
for what in OBJ_TYPES:
self.replace_objects_newer_on_remote(what)
def link_distros(self):
for distro in self.api.distros():
self.logger.debug("Linking Distro %s" % distro.name)
utils.link_distro(self.settings, distro)
def generate_include_map(self):
self.remote_names = {}
self.remote_dict = {}
for ot in OBJ_TYPES:
self.remote_names[ot] = utils.loh_to_hoh(self.remote_data[ot],"name").keys()
self.remote_dict[ot] = utils.loh_to_hoh(self.remote_data[ot],"name")
self.logger.debug("remote names struct is %s" % self.remote_names)
self.must_include = {
"distro" : {},
"profile" : {},
"system" : {},
"image" : {},
"repo" : {}
}
# include all profiles that are matched by a pattern
for otype in OBJ_TYPES:
patvar = getattr(self, "%s_patterns" % otype)
self.logger.debug("* Finding Explicit %s Matches" % otype)
for pat in patvar:
for remote in self.remote_names[otype]:
self.logger.debug("?: seeing if %s looks like %s" % (remote,pat))
if fnmatch.fnmatch(remote, pat):
self.must_include[otype][remote] = 1
# include all profiles that systems require
# whether they are explicitly included or not
self.logger.debug("* Adding Profiles Required By Systems")
for sys in self.must_include["system"].keys():
pro = self.remote_dict["system"][sys].get("profile","")
self.logger.debug("?: requires profile: %s" % pro)
if pro != "":
self.must_include["profile"][pro] = 1
# include all profiles that subprofiles require
# whether they are explicitly included or not
# very deep nesting is possible
self.logger.debug("* Adding Profiles Required By SubProfiles")
while True:
loop_exit = True
for pro in self.must_include["profile"].keys():
parent = self.remote_dict["profile"][pro].get("parent","")
if parent != "":
if not self.must_include["profile"].has_key(parent):
self.must_include["profile"][parent] = 1
loop_exit = False
if loop_exit:
break
# require all distros that any profiles in the generated list requires
# whether they are explicitly included or not
self.logger.debug("* Adding Distros Required By Profiles")
for p in self.must_include["profile"].keys():
distro = self.remote_dict["profile"][p].get("distro","")
if not distro == "<<inherit>>" and not distro == "~":
self.logger.info("Adding repo %s for profile %s."%(p, distro))
self.must_include["distro"][distro] = 1
# require any repos that any profiles in the generated list requires
# whether they are explicitly included or not
self.logger.debug("* Adding Repos Required By Profiles")
for p in self.must_include["profile"].keys():
repos = self.remote_dict["profile"][p].get("repos",[])
for r in repos:
self.must_include["repo"][r] = 1
# include all images that systems require
# whether they are explicitly included or not
self.logger.debug("* Adding Images Required By Systems")
for sys in self.must_include["system"].keys():
img = self.remote_dict["system"][sys].get("image","")
self.logger.debug("?: requires profile: %s" % pro)
if img != "":
self.must_include["image"][img] = 1
# FIXME: remove debug
for ot in OBJ_TYPES:
self.logger.debug("transfer list for %s is %s" % (ot, self.must_include[ot].keys()))
# -------------------------------------------------------
def run(self, cobbler_master=None, distro_patterns=None, profile_patterns=None, system_patterns=None, repo_patterns=None, image_patterns=None, prune=False, omit_data=False):
"""
Get remote profiles and distros and sync them locally
"""
self.distro_patterns = distro_patterns.split()
self.profile_patterns = profile_patterns.split()
self.system_patterns = system_patterns.split()
self.repo_patterns = repo_patterns.split()
self.image_patterns = image_patterns.split()
self.omit_data = omit_data
self.prune = prune
self.logger.info("cobbler_master = %s" % cobbler_master)
self.logger.info("profile_patterns = %s" % self.profile_patterns)
self.logger.info("system_patterns = %s" % self.system_patterns)
self.logger.info("omit_data = %s" % self.omit_data)
if cobbler_master is not None:
self.logger.info("using CLI defined master")
self.host = cobbler_master
self.uri = 'http://%s/cobbler_api' % cobbler_master
elif len(self.settings.cobbler_master) > 0:
self.logger.info("using info from master")
self.host = self.settings.cobbler_master
self.uri = 'http://%s/cobbler_api' % self.settings.cobbler_master
else:
utils.die('No cobbler master specified, try --master.')
self.logger.info("XMLRPC endpoint: %s" % self.uri)
self.logger.debug("test ALPHA")
self.remote = xmlrpclib.Server(self.uri)
self.logger.debug("test BETA")
self.remote.ping()
self.local = xmlrpclib.Server("http://127.0.0.1/cobbler_api")
self.local.ping()
self.replicate_data()
self.link_distros()
self.logger.info("Syncing")
self.api.sync()
self.logger.info("Done")
return True
| gpl-2.0 |
Bjay1435/capstone | rootfs/usr/lib/python3.4/curses/has_key.py | 195 | 5634 |
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print(i)
| mit |
donald-pinckney/EM-Simulator | EM Sim/EM Sim/py_lib/ctypes/macholib/framework.py | 268 | 2414 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""
Generic framework path manipulation
"""
import re
__all__ = ['framework_info']
STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
""")
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = STRICT_FRAMEWORK_RE.match(filename)
if not is_framework:
return None
return is_framework.groupdict()
def test_framework_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert framework_info('completely/invalid') is None
assert framework_info('completely/invalid/_debug') is None
assert framework_info('P/F.framework') is None
assert framework_info('P/F.framework/_debug') is None
assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
assert framework_info('P/F.framework/Versions') is None
assert framework_info('P/F.framework/Versions/A') is None
assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
if __name__ == '__main__':
test_framework_info()
| apache-2.0 |
Jannis/ardour3 | libs/pbd/pbd/signals.py | 1 | 10246 | #!/usr/bin/python
#
# Copyright (C) 2009-2012 Paul Davis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
#
# This file generates the header signals_generated.h, which
# will be put in build/libs/pbd/pbd by waf.
#
# It is probably easier to read build/libs/pbd/pbd/signals_generated.h
# than this if you want to read the code!
#
from __future__ import print_function
import sys
if len(sys.argv) < 2:
print('Syntax: %s <path>' % sys.argv[0])
sys.exit(1)
f = open(sys.argv[1], 'w')
print("/** THIS FILE IS AUTOGENERATED by signals.py: CHANGES WILL BE LOST */\n", file=f)
# Produce a comma-separated string from a list of substrings,
# giving an optional prefix to each substring
def comma_separated(n, prefix = ""):
r = ""
for i in range(0, len(n)):
if i > 0:
r += ", "
r += "%s%s" % (prefix, n[i])
return r
# Generate one SignalN class definition
# @param f File to write to
# @param n Number of parameters
# @param v True to specialize the template for a void return type
def signal(f, n, v):
# The parameters in the form A1, A2, A3, ...
An = []
for i in range(0, n):
An.append("A%d" % (i + 1))
# The parameters in the form A1 a1, A2 a2, A3 a3, ...
Anan = []
for a in An:
Anan.append('%s %s' % (a, a.lower()))
# The parameters in the form a1, a2, a3, ...
an = []
for a in An:
an.append(a.lower())
# If the template is fully specialized, use of typename SomeTypedef::iterator is illegal
# in c++03 (should use just SomeTypedef::iterator) [although use of typename is ok in c++0x]
# http://stackoverflow.com/questions/6076015/typename-outside-of-template
if n == 0 and v:
typename = ""
else:
typename = "typename "
if v:
print("/** A signal with %d parameters (specialisation for a void return) */" % n, file=f)
else:
print("/** A signal with %d parameters */" % n, file=f)
if v:
print("template <%s>" % comma_separated(An, "typename "), file=f)
print("class Signal%d<%s> : public SignalBase" % (n, comma_separated(["void"] + An)), file=f)
else:
print("template <%s>" % comma_separated(["R"] + An + ["C = OptionalLastValue<R> "], "typename "), file=f)
print("class Signal%d : public SignalBase" % n, file=f)
print("{", file=f)
print("public:", file=f)
print("", file=f)
if v:
print("\ttypedef boost::function<void(%s)> slot_function_type;" % comma_separated(An), file=f)
print("\ttypedef void result_type;", file=f)
else:
print("\ttypedef boost::function<R(%s)> slot_function_type;" % comma_separated(An), file=f)
print("\ttypedef boost::optional<R> result_type;", file=f)
print("", file=f)
print("private:", file=f)
print("""
/** The slots that this signal will call on emission */
typedef std::map<boost::shared_ptr<Connection>, slot_function_type> Slots;
Slots _slots;
""", file=f)
print("public:", file=f)
print("", file=f)
print("\t~Signal%d () {" % n, file=f)
print("\t\tGlib::Threads::Mutex::Lock lm (_mutex);", file=f)
print("\t\t/* Tell our connection objects that we are going away, so they don't try to call us */", file=f)
print("\t\tfor (%sSlots::iterator i = _slots.begin(); i != _slots.end(); ++i) {" % typename, file=f)
print("\t\t\ti->first->signal_going_away ();", file=f)
print("\t\t}", file=f)
print("\t}", file=f)
print("", file=f)
if n == 0:
p = ""
q = ""
else:
p = ", %s" % comma_separated(Anan)
q = ", %s" % comma_separated(an)
print("\tstatic void compositor (%sboost::function<void(%s)> f, EventLoop* event_loop, EventLoop::InvalidationRecord* ir%s) {" % (typename, comma_separated(An), p), file=f)
print("\t\tevent_loop->call_slot (ir, boost::bind (f%s));" % q, file=f)
print("\t}", file=f)
print("""
/** Arrange for @a slot to be executed whenever this signal is emitted.
Store the connection that represents this arrangement in @a c.
NOTE: @a slot will be executed in the same thread that the signal is
emitted in.
*/
void connect_same_thread (ScopedConnection& c, const slot_function_type& slot) {
c = _connect (slot);
}
/** Arrange for @a slot to be executed whenever this signal is emitted.
Add the connection that represents this arrangement to @a clist.
NOTE: @a slot will be executed in the same thread that the signal is
emitted in.
*/
void connect_same_thread (ScopedConnectionList& clist, const slot_function_type& slot) {
clist.add_connection (_connect (slot));
}
/** Arrange for @a slot to be executed in the context of @a event_loop
whenever this signal is emitted. Add the connection that represents
this arrangement to @a clist.
If the event loop/thread in which @a slot will be executed will
outlive the lifetime of any object referenced in @a slot,
then an InvalidationRecord should be passed, allowing
any request sent to the @a event_loop and not executed
before the object is destroyed to be marked invalid.
"outliving the lifetime" doesn't have a specific, detailed meaning,
but is best illustrated by two contrasting examples:
1) the main GUI event loop/thread - this will outlive more or
less all objects in the application, and thus when arranging for
@a slot to be called in that context, an invalidation record is
highly advisable.
2) a secondary event loop/thread which will be destroyed along
with the objects that are typically referenced by @a slot.
Assuming that the event loop is stopped before the objects are
destroyed, there is no reason to pass in an invalidation record,
and MISSING_INVALIDATOR may be used.
*/
void connect (ScopedConnectionList& clist,
PBD::EventLoop::InvalidationRecord* ir,
const slot_function_type& slot,
PBD::EventLoop* event_loop) {
if (ir) {
ir->event_loop = event_loop;
}
""", file=f)
u = []
for i in range(0, n):
u.append("_%d" % (i + 1))
if n == 0:
p = ""
else:
p = ", %s" % comma_separated(u)
print("\t\tclist.add_connection (_connect (boost::bind (&compositor, slot, event_loop, ir%s)));" % p, file=f)
print("""
}
/** See notes for the ScopedConnectionList variant of this function. This
* differs in that it stores the connection to the signal in a single
* ScopedConnection rather than a ScopedConnectionList.
*/
void connect (ScopedConnection& c,
PBD::EventLoop::InvalidationRecord* ir,
const slot_function_type& slot,
PBD::EventLoop* event_loop) {
if (ir) {
ir->event_loop = event_loop;
}
""", file=f)
print("\t\tc = _connect (boost::bind (&compositor, slot, event_loop, ir%s));" % p, file=f)
print("\t}", file=f)
print("""
/** Emit this signal. This will cause all slots connected to it be executed
in the order that they were connected (cross-thread issues may alter
the precise execution time of cross-thread slots).
*/
""", file=f)
if v:
print("\tvoid operator() (%s)" % comma_separated(Anan), file=f)
else:
print("\ttypename C::result_type operator() (%s)" % comma_separated(Anan), file=f)
print("\t{", file=f)
print("\t\t/* First, take a copy of our list of slots as it is now */", file=f)
print("", file=f)
print("\t\tSlots s;", file=f)
print("\t\t{", file=f)
print("\t\t\tGlib::Threads::Mutex::Lock lm (_mutex);", file=f)
print("\t\t\ts = _slots;", file=f)
print("\t\t}", file=f)
print("", file=f)
if not v:
print("\t\tstd::list<R> r;", file=f)
print("\t\tfor (%sSlots::iterator i = s.begin(); i != s.end(); ++i) {" % typename, file=f)
print("""
/* We may have just called a slot, and this may have resulted in
disconnection of other slots from us. The list copy means that
this won't cause any problems with invalidated iterators, but we
must check to see if the slot we are about to call is still on the list.
*/
bool still_there = false;
{
Glib::Threads::Mutex::Lock lm (_mutex);
still_there = _slots.find (i->first) != _slots.end ();
}
if (still_there) {""", file=f)
if v:
print("\t\t\t\t(i->second)(%s);" % comma_separated(an), file=f)
else:
print("\t\t\t\tr.push_back ((i->second)(%s));" % comma_separated(an), file=f)
print("\t\t\t}", file=f)
print("\t\t}", file=f)
print("", file=f)
if not v:
print("\t\t/* Call our combiner to do whatever is required to the result values */", file=f)
print("\t\tC c;", file=f)
print("\t\treturn c (r.begin(), r.end());", file=f)
print("\t}", file=f)
print("""
bool empty () {
Glib::Threads::Mutex::Lock lm (_mutex);
return _slots.empty ();
}
""", file=f)
if v:
tp = comma_separated(["void"] + An)
else:
tp = comma_separated(["R"] + An + ["C"])
print("private:", file=f)
print("", file=f)
print("\tfriend class Connection;", file=f)
print("""
boost::shared_ptr<Connection> _connect (slot_function_type f)
{
boost::shared_ptr<Connection> c (new Connection (this));
Glib::Threads::Mutex::Lock lm (_mutex);
_slots[c] = f;
return c;
}""", file=f)
print("""
void disconnect (boost::shared_ptr<Connection> c)
{
Glib::Threads::Mutex::Lock lm (_mutex);
_slots.erase (c);
}
};
""", file=f)
for i in range(0, 6):
signal(f, i, False)
signal(f, i, True)
| gpl-2.0 |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Utilities/TokenStorage/__init__.py | 4 | 1173 | from temboo.Library.Utilities.TokenStorage.DeleteToken import DeleteToken, DeleteTokenInputSet, DeleteTokenResultSet, DeleteTokenChoreographyExecution
from temboo.Library.Utilities.TokenStorage.GetTokenDetails import GetTokenDetails, GetTokenDetailsInputSet, GetTokenDetailsResultSet, GetTokenDetailsChoreographyExecution
from temboo.Library.Utilities.TokenStorage.IsLocked import IsLocked, IsLockedInputSet, IsLockedResultSet, IsLockedChoreographyExecution
from temboo.Library.Utilities.TokenStorage.IsValid import IsValid, IsValidInputSet, IsValidResultSet, IsValidChoreographyExecution
from temboo.Library.Utilities.TokenStorage.LockToken import LockToken, LockTokenInputSet, LockTokenResultSet, LockTokenChoreographyExecution
from temboo.Library.Utilities.TokenStorage.RetrieveToken import RetrieveToken, RetrieveTokenInputSet, RetrieveTokenResultSet, RetrieveTokenChoreographyExecution
from temboo.Library.Utilities.TokenStorage.SetValid import SetValid, SetValidInputSet, SetValidResultSet, SetValidChoreographyExecution
from temboo.Library.Utilities.TokenStorage.StoreToken import StoreToken, StoreTokenInputSet, StoreTokenResultSet, StoreTokenChoreographyExecution
| apache-2.0 |
shenyy/lily2-gem5 | src/cpu/testers/directedtest/RubyDirectedTester.py | 17 | 2482 | # Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
from m5.SimObject import SimObject
from MemObject import MemObject
from m5.params import *
from m5.proxy import *
class DirectedGenerator(SimObject):
type = 'DirectedGenerator'
abstract = True
num_cpus = Param.Int("num of cpus")
system = Param.System(Parent.any, "System we belong to")
class SeriesRequestGenerator(DirectedGenerator):
type = 'SeriesRequestGenerator'
addr_increment_size = Param.Int(64, "address increment size")
issue_writes = Param.Bool(True, "issue writes if true, otherwise reads")
class InvalidateGenerator(DirectedGenerator):
type = 'InvalidateGenerator'
addr_increment_size = Param.Int(64, "address increment size")
class RubyDirectedTester(MemObject):
type = 'RubyDirectedTester'
cpuPort = VectorMasterPort("the cpu ports")
requests_to_complete = Param.Int("checks to complete")
generator = Param.DirectedGenerator("the request generator")
| bsd-3-clause |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/cms/tests/menu_utils.py | 7 | 3062 | from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.mock import AttributeObject
from django.http import HttpResponse
from menus.templatetags.menu_tags import PageLanguageUrl
from menus.utils import (simple_language_changer, find_selected,
language_changer_decorator)
class DumbPageLanguageUrl(PageLanguageUrl):
def __init__(self): pass
class MenuUtilsTests(CMSTestCase):
def get_simple_view(self):
def myview(request):
return HttpResponse('')
return myview
def test_simple_language_changer(self):
func = self.get_simple_view()
decorated_view = simple_language_changer(func)
# check we maintain the view name
self.assertEqual(func.__name__, decorated_view.__name__)
request = self.get_request('/', 'en')
response = decorated_view(request)
self.assertEqual(response.content, '')
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/')
output = tag.get_context(fake_context, 'ja')
url = output['content']
self.assertEqual(url, '/ja/')
def test_default_language_changer(self):
view = self.get_simple_view()
# check we maintain the view name
self.assertEqual(view.__name__, view.__name__)
request = self.get_request('/en/', 'en')
response = view(request)
self.assertEqual(response.content, '')
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/')
output = tag.get_context(fake_context, 'ja')
url = output['content']
self.assertEqual(url, '/ja/')
def test_language_changer_decorator(self):
def lang_changer(lang):
return "/%s/dummy/" % lang
decorated_view = language_changer_decorator(lang_changer)(self.get_simple_view())
request = self.get_request('/some/path/', 'en')
response = decorated_view(request)
self.assertEqual(response.content, '')
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/dummy/')
output = tag.get_context(fake_context, 'ja')
url = output['content']
self.assertEqual(url, '/ja/dummy/')
def test_find_selected(self):
subchild = AttributeObject()
firstchild = AttributeObject(ancestor=True, children=[subchild])
selectedchild = AttributeObject(selected=True)
secondchild = AttributeObject(ancestor=True, children=[selectedchild])
root = AttributeObject(ancestor=True, children=[firstchild, secondchild])
nodes = [root]
selected = find_selected(nodes)
self.assertEqual(selected, selectedchild)
| apache-2.0 |
wolftankk/livestreamer | src/livestreamer/plugins/wattv.py | 32 | 2341 | import hashlib
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.stream import HDSStream
# Got the secret from the swf with rev number location
# (tv/wat/player/media/Media.as)
TOKEN_SECRET = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
_url_re = re.compile("http(s)?://(\w+\.)?wat.tv/")
_video_id_re = re.compile("href=\"http://m.wat.tv/video/([^\"]+)", re.IGNORECASE)
class WAT(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = video_id = _video_id_re.search(res.text)
if not match:
return
video_id = match.group(1)
# TODO: Replace with "yield from" when dropping Python 2.
for __ in self._create_streams('web', video_id).items():
yield __
for __ in self._create_streams('webhd', video_id).items():
yield __
def _create_streams(self, type_, video_id):
url = self._generate_security_url(type_, video_id)
res = http.get(url)
return HDSStream.parse_manifest(self.session, res.text, cookies=res.cookies)
def _generate_security_url(self, type_, video_id):
token = self._generate_security_token(type_, video_id)
return ("http://www.wat.tv/get/{type_}/{video_id}?token={token}"
"&domain=www.wat.tv&refererURL=wat.tv&revision=04.00.719%0A&"
"synd=0&helios=1&context=playerWat&pub=1&country=FR"
"&sitepage=WAT%2Ftv%2Ft%2Finedit%2Ftf1%2Fparamount_pictures_"
"france&lieu=wat&playerContext=CONTEXT_WAT&getURL=1"
"&version=LNX%2014,0,0,125").format(**locals())
def _generate_security_token(self, type_, video_id):
# Get timestamp
res = http.get('http://www.wat.tv/servertime')
timestamp = int(res.text.split('|')[0])
timestamp_hex = format(timestamp, 'x').rjust(8, '0')
# Player id
player_prefix = "/{0}/{1}".format(type_, video_id)
# Create the token
data = (TOKEN_SECRET + player_prefix + timestamp_hex).encode('utf8')
token = hashlib.md5(data)
token = "{0}/{1}".format(token.hexdigest(), timestamp_hex)
return token
__plugin__ = WAT
| bsd-2-clause |
DragonMastur/FakeRealNewsDetection | Serverless Functions/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| agpl-3.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/requests/requests/packages/urllib3/contrib/pyopenssl.py | 215 | 10101 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| lgpl-3.0 |
eshioji/omibot | ampel.py | 1 | 1487 | import subprocess
import threading
import time
import traceback
import common
import config
class Ampel:
def __init__(self, base_command):
self.base_command = base_command
def signal(self, order, switchto):
switch = '1' if switchto else '0'
cmd = self.base_command + ['-as', str(order), switch]
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def flash(self, duration):
def do_flash():
try:
started = time.time()
while True:
elapsed = time.time() - started
if elapsed > duration:
[self.signal(x, 0) for x in range(3)]
return
else:
[self.signal(x, 1) for x in range(3)]
time.sleep(1)
[self.signal(x, 0) for x in range(3)]
except:
tb = traceback.format_exc()
common.error(tb)
flash_thread = threading.Thread(target=do_flash)
flash_thread.start()
return flash_thread
def check_status(self):
try:
out = subprocess.check_output(self.base_command + ['-l'], stderr=subprocess.STDOUT)
return 0, out
except subprocess.CalledProcessError as e:
return e.returncode, e.output
if __name__ == '__main__':
ampel = Ampel(config.cleware_exec)
ret, out = ampel.signal(0, 1)
| apache-2.0 |
bt3gl/Python-and-Algorithms-and-Data-Structures | First_edition_2014/ebook_src/builtin_structures/permutations.py | 2 | 1171 | #!/usr/bin/env python
__author__ = "bt3"
def perm(str1):
'''
>>> perm('123')
['123', '132', '231', '213', '312', '321']
'''
if len(str1) < 2:
return str1
res = []
for i, c in enumerate(str1):
for cc in perm(str1[i+1:] + str1[:i]):
res.append(c + cc)
return res
def perm2(str1):
'''
>>> perm2('123')
['123', '132', '213', '231', '312', '321']
'''
from itertools import permutations
return [''.join(p) for p in permutations(str1)]
def ispermutation(s1, s2):
'''
>>> ispermutation('231', '123')
True
>>> ispermutation('231', '153')
False
'''
from collections import Counter
aux = Counter()
for i in s1:
aux[i] += 1
for i in s2:
aux[i] -= 1
for v in aux.values():
if v != 0:
return False
return True
def ispermutation2(s1, s2):
'''
>>> ispermutation2('231', '123')
True
>>> ispermutation2('231', '153')
False
'''
if sorted(s1) == sorted(s2):
return True
else:
return False
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit |
SAM-IT-SA/odoo | openerp/addons/base/tests/test_qweb.py | 289 | 4814 | # -*- coding: utf-8 -*-
import cgi
import json
import os.path
import glob
import re
import collections
from lxml import etree
import openerp.addons.base.ir.ir_qweb
import openerp.modules
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
class TestQWebTField(common.TransactionCase):
def setUp(self):
super(TestQWebTField, self).setUp()
self.engine = self.registry('ir.qweb')
def context(self, values):
return ir_qweb.QWebContext(
self.cr, self.uid, values, context={'inherit_branding': True})
def test_trivial(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
company_id = Companies.create(self.cr, self.uid, {
'name': "My Test Company"
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
"My Test Company",))
def test_i18n(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
s = u"Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20% off!"
company_id = Companies.create(self.cr, self.uid, {
'name': s,
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
cgi.escape(s.encode('utf-8')),))
def test_reject_crummy_tags(self):
field = etree.Element('td', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^RTE widgets do not work correctly'):
self.engine.render_node(field, self.context({
'company': None
}))
def test_reject_t_tag(self):
field = etree.Element('t', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^t-field can not be used on a t element'):
self.engine.render_node(field, self.context({
'company': None
}))
class TestQWeb(common.TransactionCase):
matcher = re.compile('^qweb-test-(.*)\.xml$')
@classmethod
def get_cases(cls):
path = cls.qweb_test_file_path()
return (
cls("test_qweb_{}".format(cls.matcher.match(f).group(1)))
for f in os.listdir(path)
# js inheritance
if f != 'qweb-test-extend.xml'
if cls.matcher.match(f)
)
@classmethod
def qweb_test_file_path(cls):
path = os.path.dirname(
openerp.modules.get_module_resource(
'web', 'static', 'lib', 'qweb', 'qweb2.js'))
return path
def __getattr__(self, item):
if not item.startswith('test_qweb_'):
raise AttributeError("No {} on {}".format(item, self))
f = 'qweb-test-{}.xml'.format(item[10:])
path = self.qweb_test_file_path()
return lambda: self.run_test_file(os.path.join(path, f))
def run_test_file(self, path):
context = openerp.addons.base.ir.ir_qweb.QWebContext(self.cr, self.uid, {})
qweb = self.env['ir.qweb']
doc = etree.parse(path).getroot()
qweb.load_document(doc, None, context)
for template in context.templates:
if template.startswith('_'): continue
param = doc.find('params[@id="{}"]'.format(template))
# OrderedDict to ensure JSON mappings are iterated in source order
# so output is predictable & repeatable
params = {} if param is None else json.loads(param.text, object_pairs_hook=collections.OrderedDict)
ctx = context.copy()
ctx.update(params)
result = doc.find('result[@id="{}"]'.format(template)).text
self.assertEqual(
qweb.render(template, qwebcontext=ctx).strip(),
(result or u'').strip().encode('utf-8'),
template
)
def load_tests(loader, suite, _):
# can't override TestQWeb.__dir__ because dir() called on *class* not
# instance
suite.addTests(TestQWeb.get_cases())
return suite
| agpl-3.0 |
ohagendorf/project_generator | project_generator/settings.py | 4 | 2317 | # Copyright 2014-2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import normpath, join, pardir, sep
class ProjectSettings:
PROJECT_ROOT = os.environ.get('PROJECT_GENERATOR_ROOT') or join(pardir, pardir)
DEFAULT_TOOL = os.environ.get('PROJECT_GENERATOR_DEFAULT_TOOL') or 'uvision'
DEFAULT_EXPORT_LOCATION_FORMAT = join('generated_projects', '{tool}_{project_name}')
DEFAULT_ROOT = os.getcwd()
def __init__(self):
""" This are default enviroment settings for build tools. To override,
define them in the projects.yaml file. """
self.paths = {}
self.templates = {}
self.paths['uvision'] = os.environ.get('UV4') or join('C:', sep,
'Keil', 'UV4', 'UV4.exe')
self.paths['iar'] = os.environ.get('IARBUILD') or join(
'C:', sep, 'Program Files (x86)',
'IAR Systems', 'Embedded Workbench 7.0',
'common', 'bin')
self.paths['gcc'] = os.environ.get('ARM_GCC_PATH') or ''
self.export_location_format = self.DEFAULT_EXPORT_LOCATION_FORMAT
self.root = os.getcwd()
def update(self, settings):
if settings:
if 'tools' in settings:
for k, v in settings['tools'].items():
if k in self.paths:
if 'path' in v.keys():
self.paths[k] = v['path'][0]
if 'template' in v.keys():
self.templates[k] = v['template']
if 'export_dir' in settings:
self.export_location_format = normpath(settings['export_dir'][0])
if 'root' in settings:
self.root = normpath(settings['root'][0])
def get_env_settings(self, env_set):
return self.paths[env_set]
| apache-2.0 |
XiaodunServerGroup/xiaodun-platform | lms/djangoapps/wechat/tests/test_tabs.py | 13 | 14836 | from django.test import TestCase
from mock import MagicMock
from mock import patch
import courseware.tabs as tabs
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from .helpers import LoginEnrollmentTestCase
FAKE_REQUEST = None
def tab_constructor(active_page, course, user, tab={'name': 'same'}, generator=tabs._progress):
return generator(tab, user, course, active_page, FAKE_REQUEST)
class ProgressTestCase(TestCase):
def setUp(self):
self.user = MagicMock()
self.anonymous_user = MagicMock()
self.course = MagicMock()
self.user.is_authenticated.return_value = True
self.anonymous_user.is_authenticated.return_value = False
self.course.id = 'edX/toy/2012_Fall'
self.tab = {'name': 'same'}
self.progress_page = 'progress'
self.stagnation_page = 'stagnation'
def test_progress(self):
self.assertEqual(tab_constructor(self.stagnation_page, self.course, self.anonymous_user), [])
self.assertEqual(tab_constructor(self.progress_page, self.course, self.user)[0].name, 'same')
tab_list = tab_constructor(self.progress_page, self.course, self.user)
expected_link = reverse('progress', args=[self.course.id])
self.assertEqual(tab_list[0].link, expected_link)
self.assertEqual(tab_constructor(self.stagnation_page, self.course, self.user)[0].is_active, False)
self.assertEqual(tab_constructor(self.progress_page, self.course, self.user)[0].is_active, True)
class WikiTestCase(TestCase):
def setUp(self):
self.user = MagicMock()
self.course = MagicMock()
self.course.id = 'edX/toy/2012_Fall'
self.tab = {'name': 'same'}
self.wiki_page = 'wiki'
self.miki_page = 'miki'
@override_settings(WIKI_ENABLED=True)
def test_wiki_enabled(self):
tab_list = tab_constructor(self.wiki_page, self.course, self.user, generator=tabs._wiki)
self.assertEqual(tab_list[0].name, 'same')
tab_list = tab_constructor(self.wiki_page, self.course, self.user, generator=tabs._wiki)
expected_link = reverse('course_wiki', args=[self.course.id])
self.assertEqual(tab_list[0].link, expected_link)
tab_list = tab_constructor(self.wiki_page, self.course, self.user, generator=tabs._wiki)
self.assertEqual(tab_list[0].is_active, True)
tab_list = tab_constructor(self.miki_page, self.course, self.user, generator=tabs._wiki)
self.assertEqual(tab_list[0].is_active, False)
@override_settings(WIKI_ENABLED=False)
def test_wiki_enabled_false(self):
tab_list = tab_constructor(self.wiki_page, self.course, self.user, generator=tabs._wiki)
self.assertEqual(tab_list, [])
class ExternalLinkTestCase(TestCase):
def setUp(self):
self.user = MagicMock()
self.course = MagicMock()
self.tabby = {'name': 'same', 'link': 'blink'}
self.no_page = None
self.true = True
def test_external_link(self):
tab_list = tab_constructor(
self.no_page, self.course, self.user, tab=self.tabby, generator=tabs._external_link
)
self.assertEqual(tab_list[0].name, 'same')
tab_list = tab_constructor(
self.no_page, self.course, self.user, tab=self.tabby, generator=tabs._external_link
)
self.assertEqual(tab_list[0].link, 'blink')
tab_list = tab_constructor(
self.no_page, self.course, self.user, tab=self.tabby, generator=tabs._external_link
)
self.assertEqual(tab_list[0].is_active, False)
tab_list = tab_constructor(
self.true, self.course, self.user, tab=self.tabby, generator=tabs._external_link
)
self.assertEqual(tab_list[0].is_active, False)
class StaticTabTestCase(TestCase):
def setUp(self):
self.user = MagicMock()
self.course = MagicMock()
self.tabby = {'name': 'same', 'url_slug': 'schmug'}
self.course.id = 'edX/toy/2012_Fall'
self.schmug = 'static_tab_schmug'
self.schlug = 'static_tab_schlug'
def test_static_tab(self):
tab_list = tab_constructor(
self.schmug, self.course, self.user, tab=self.tabby, generator=tabs._static_tab
)
self.assertEqual(tab_list[0].name, 'same')
tab_list = tab_constructor(
self.schmug, self.course, self.user, tab=self.tabby, generator=tabs._static_tab
)
expected_link = reverse('static_tab', args=[self.course.id,self.tabby['url_slug']])
self.assertEqual(tab_list[0].link, expected_link)
tab_list = tab_constructor(
self.schmug, self.course, self.user, tab=self.tabby, generator=tabs._static_tab
)
self.assertEqual(tab_list[0].is_active, True)
tab_list = tab_constructor(
self.schlug, self.course, self.user, tab=self.tabby, generator=tabs._static_tab
)
self.assertEqual(tab_list[0].is_active, False)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StaticTabDateTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
def setUp(self):
self.course = CourseFactory.create()
self.page = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="new_tab"
)
# The following XML course is closed; we're testing that
# static tabs still appear when the course is already closed
self.xml_data = "static 463139"
self.xml_url = "8e4cce2b4aaf4ba28b1220804619e41f"
self.xml_course_id = 'edX/detached_pages/2014'
def test_logged_in(self):
self.setup_user()
url = reverse('static_tab', args=[self.course.id, 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_anonymous_user(self):
url = reverse('static_tab', args=[self.course.id, 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('static_tab', args=[self.xml_course_id, self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('static_tab', args=[self.xml_course_id, self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
class TextbooksTestCase(TestCase):
def setUp(self):
self.user = MagicMock()
self.anonymous_user = MagicMock()
self.course = MagicMock()
self.tab = MagicMock()
A = MagicMock()
T = MagicMock()
A.title = 'Algebra'
T.title = 'Topology'
self.course.textbooks = [A, T]
self.user.is_authenticated.return_value = True
self.anonymous_user.is_authenticated.return_value = False
self.course.id = 'edX/toy/2012_Fall'
self.textbook_0 = 'textbook/0'
self.textbook_1 = 'textbook/1'
self.prohibited_page = 'you_shouldnt_be_seein_this'
@override_settings(FEATURES={'ENABLE_TEXTBOOK': True})
def test_textbooks1(self):
tab_list = tab_constructor(
self.textbook_0, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list[0].name, 'Algebra')
tab_list = tab_constructor(
self.textbook_0, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
expected_link = reverse('book', args=[self.course.id, 0])
self.assertEqual(tab_list[0].link, expected_link)
tab_list = tab_constructor(
self.textbook_0, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list[0].is_active, True)
tab_list = tab_constructor(
self.prohibited_page, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list[0].is_active, False)
tab_list = tab_constructor(
self.textbook_1, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list[1].name, 'Topology')
tab_list = tab_constructor(
self.textbook_1, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
expected_link = reverse('book', args=[self.course.id, 1])
self.assertEqual(tab_list[1].link, expected_link)
tab_list = tab_constructor(
self.textbook_1, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list[1].is_active, True)
tab_list = tab_constructor(
self.prohibited_page, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list[1].is_active, False)
@override_settings(FEATURES={'ENABLE_TEXTBOOK': False})
def test_textbooks0(self):
tab_list = tab_constructor(
self.prohibited_page, self.course, self.user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list, [])
tab_list = tab_constructor(
self.prohibited_page, self.course, self.anonymous_user, tab=self.tab, generator=tabs._textbooks
)
self.assertEqual(tab_list, [])
class KeyCheckerTestCase(TestCase):
def setUp(self):
self.valid_keys = ['a', 'b']
self.invalid_keys = ['a', 'v', 'g']
self.dictio = {'a': 1, 'b': 2, 'c': 3}
def test_key_checker(self):
self.assertIsNone(tabs.key_checker(self.valid_keys)(self.dictio))
self.assertRaises(tabs.InvalidTabsException,
tabs.key_checker(self.invalid_keys), self.dictio)
class NullValidatorTestCase(TestCase):
def setUp(self):
self.dummy = {}
def test_null_validator(self):
self.assertIsNone(tabs.null_validator(self.dummy))
class ValidateTabsTestCase(TestCase):
def setUp(self):
self.courses = [MagicMock() for i in range(0, 5)]
self.courses[0].tabs = None
self.courses[1].tabs = [{'type': 'courseware'}, {'type': 'fax'}]
self.courses[2].tabs = [{'type': 'shadow'}, {'type': 'course_info'}]
self.courses[3].tabs = [{'type': 'courseware'}, {'type': 'course_info', 'name': 'alice'},
{'type': 'wiki', 'name': 'alice'}, {'type': 'discussion', 'name': 'alice'},
{'type': 'external_link', 'name': 'alice', 'link': 'blink'},
{'type': 'textbooks'}, {'type': 'progress', 'name': 'alice'},
{'type': 'static_tab', 'name': 'alice', 'url_slug': 'schlug'},
{'type': 'staff_grading'}]
self.courses[4].tabs = [{'type': 'courseware'}, {'type': 'course_info'}, {'type': 'flying'}]
def test_validate_tabs(self):
self.assertIsNone(tabs.validate_tabs(self.courses[0]))
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1])
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2])
self.assertIsNone(tabs.validate_tabs(self.courses[3]))
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[4])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class DiscussionLinkTestCase(ModuleStoreTestCase):
def setUp(self):
self.tabs_with_discussion = [
{'type': 'courseware'},
{'type': 'course_info'},
{'type': 'discussion'},
{'type': 'textbooks'},
]
self.tabs_without_discussion = [
{'type': 'courseware'},
{'type': 'course_info'},
{'type': 'textbooks'},
]
@staticmethod
def _patch_reverse(course):
def patched_reverse(viewname, args):
if viewname == "django_comment_client.forum.views.forum_form_discussion" and args == [course.id]:
return "default_discussion_link"
else:
return None
return patch("courseware.tabs.reverse", patched_reverse)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": False})
def test_explicit_discussion_link(self):
"""Test that setting discussion_link overrides everything else"""
course = CourseFactory.create(discussion_link="other_discussion_link", tabs=self.tabs_with_discussion)
self.assertEqual(tabs.get_discussion_link(course), "other_discussion_link")
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": False})
def test_discussions_disabled(self):
"""Test that other cases return None with discussions disabled"""
for i, t in enumerate([None, self.tabs_with_discussion, self.tabs_without_discussion]):
course = CourseFactory.create(tabs=t, number=str(i))
self.assertEqual(tabs.get_discussion_link(course), None)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_no_tabs(self):
"""Test a course without tabs configured"""
course = CourseFactory.create(tabs=None)
with self._patch_reverse(course):
self.assertEqual(tabs.get_discussion_link(course), "default_discussion_link")
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_with_discussion(self):
"""Test a course with a discussion tab configured"""
course = CourseFactory.create(tabs=self.tabs_with_discussion)
with self._patch_reverse(course):
self.assertEqual(tabs.get_discussion_link(course), "default_discussion_link")
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_without_discussion(self):
"""Test a course with tabs configured but without a discussion tab"""
course = CourseFactory.create(tabs=self.tabs_without_discussion)
self.assertEqual(tabs.get_discussion_link(course), None)
| agpl-3.0 |
Lynx187/script.module.urlresolver | lib/urlresolver/plugins/facebook.py | 4 | 3010 | '''
facebook urlresolver plugin
Copyright (C) 2013 icharania
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class FacebookResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "facebook"
domains = [ "facebook.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
link = self.net.http_GET(web_url).content
if link.find('Video Unavailable') >= 0:
err_message = 'The requested video was not found.'
raise UrlResolver.ResolverError(err_message)
params = re.compile('"params","([\w\%\-\.\\\]+)').findall(link)[0]
html = urllib.unquote(params.replace('\u0025', '%')).decode('utf-8')
html = html.replace('\\', '')
videoUrl = re.compile('(?:hd_src|sd_src)\":\"([\w\-\.\_\/\&\=\:\?]+)').findall(html)
vUrl = ''
vUrlsCount = len(videoUrl)
if vUrlsCount > 0:
q = self.get_setting('quality')
if q == '0':
# Highest Quality
vUrl = videoUrl[0]
else:
# Standard Quality
vUrl = videoUrl[vUrlsCount - 1]
return vUrl
else:
raise UrlResolver.ResolverError('No playable video found.')
def get_url(self, host, media_id):
return 'https://www.facebook.com/video/embed?video_id=%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/video/embed?video_id=(\w+)', url)
return r.groups()
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match('https?://(www\.)?facebook.com/video/embed?video_id=(\w+)', url) or \
self.name in host
#PluginSettings methods
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting label="Video Quality" id="%s_quality" ' % self.__class__.__name__
xml += 'type="enum" values="High|Standard" default="0" />\n'
return xml
| gpl-2.0 |
javachengwc/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/maps/google/zoom.py | 224 | 6622 | from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from django.utils.six.moves import xrange
from math import pi, sin, log, exp, atan
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixel
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
| apache-2.0 |
JonathanStein/odoo | addons/membership/report/report_membership.py | 313 | 5267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
import openerp.addons.decimal_precision as dp
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
class report_membership(osv.osv):
'''Membership Analysis'''
_name = 'report.membership'
_description = __doc__
_auto = False
_rec_name = 'start_date'
_columns = {
'start_date': fields.date('Start Date', readonly=True),
'date_to': fields.date('End Date', readonly=True, help="End membership date"),
'num_waiting': fields.integer('# Waiting', readonly=True),
'num_invoiced': fields.integer('# Invoiced', readonly=True),
'num_paid': fields.integer('# Paid', readonly=True),
'tot_pending': fields.float('Pending Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'tot_earned': fields.float('Earned Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'partner_id': fields.many2one('res.partner', 'Member', readonly=True),
'associate_member_id': fields.many2one('res.partner', 'Associate Member', readonly=True),
'membership_id': fields.many2one('product.product', 'Membership Product', readonly=True),
'membership_state': fields.selection(STATE, 'Current Membership State', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'quantity': fields.integer("Quantity", readonly=True),
}
def init(self, cr):
'''Create the view'''
tools.drop_view_if_exists(cr, 'report_membership')
cr.execute("""
CREATE OR REPLACE VIEW report_membership AS (
SELECT
MIN(id) AS id,
partner_id,
count(membership_id) as quantity,
user_id,
membership_state,
associate_member_id,
membership_amount,
date_to,
start_date,
COUNT(num_waiting) AS num_waiting,
COUNT(num_invoiced) AS num_invoiced,
COUNT(num_paid) AS num_paid,
SUM(tot_pending) AS tot_pending,
SUM(tot_earned) AS tot_earned,
membership_id,
company_id
FROM
(SELECT
MIN(p.id) AS id,
p.id AS partner_id,
p.user_id AS user_id,
p.membership_state AS membership_state,
p.associate_member AS associate_member_id,
p.membership_amount AS membership_amount,
p.membership_stop AS date_to,
p.membership_start AS start_date,
CASE WHEN ml.state = 'waiting' THEN ml.id END AS num_waiting,
CASE WHEN ml.state = 'invoiced' THEN ml.id END AS num_invoiced,
CASE WHEN ml.state = 'paid' THEN ml.id END AS num_paid,
CASE WHEN ml.state IN ('waiting', 'invoiced') THEN SUM(il.price_subtotal) ELSE 0 END AS tot_pending,
CASE WHEN ml.state = 'paid' OR p.membership_state = 'old' THEN SUM(il.price_subtotal) ELSE 0 END AS tot_earned,
ml.membership_id AS membership_id,
p.company_id AS company_id
FROM res_partner p
LEFT JOIN membership_membership_line ml ON (ml.partner = p.id)
LEFT JOIN account_invoice_line il ON (ml.account_invoice_line = il.id)
LEFT JOIN account_invoice ai ON (il.invoice_id = ai.id)
WHERE p.membership_state != 'none' and p.active = 'true'
GROUP BY
p.id,
p.user_id,
p.membership_state,
p.associate_member,
p.membership_amount,
p.membership_start,
ml.membership_id,
p.company_id,
ml.state,
ml.id
) AS foo
GROUP BY
start_date,
date_to,
partner_id,
user_id,
membership_id,
company_id,
membership_state,
associate_member_id,
membership_amount
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
calebjordan/klayout-macros | pymacros/Write Images.py | 1 | 4299 | # $description: Write Image Distribution from Wafer
# $autorun
# $show-in-menu
import pya
import sys
from math import copysign
from numpy import arange
sys.stderr = sys.stdout
class MenuAction(pya.Action):
def __init__(self, title, shortcut, action):
self.title = title
self.shortcut = shortcut
self.action = action
def triggered(self):
self.action()
def write_distribution(job_file, cv, dbu):
#For cellinstarray in wafer
#Loop through various layouts on wafer
#For cellinst in cellinstarray
#Loop through all dies in particular layout, (cell selection)
#For images in cellinst
#Loop through images in die, writing entry for each image
for inst in cv.cell.each_inst():
subcell = inst.cell
# Determine indexes for cell selection
if copysign(1, inst.a.x) == 1:
start_index_x = 0
else:
start_index_x = 1
if copysign(1, inst.b.y) == 1:
start_index_y = 0
else:
start_index_y = 1
x_index = arange(copysign(start_index_x, inst.a.x), copysign(inst.na, inst.a.x) + copysign(start_index_x, inst.a.x), copysign(1, inst.a.x))
y_index = arange(copysign(start_index_y, inst.b.y), copysign(inst.nb, inst.b.y) + copysign(start_index_y, inst.b.y), copysign(1, inst.b.y))
# Write each die type
print("\nPrinting {} dies containing {}".format(inst.na*inst.nb, subcell.basic_name()))
for i in x_index:
for j in y_index:
print("\tPrinting die at {:.0f}, {:.0f}".format(i, j))
for image in subcell.each_inst():
#Get position
itrans = pya.ICplxTrans.from_trans(pya.CplxTrans())
box = image.bbox().transformed(itrans)
x = box.center().x*dbu/1000.
y = box.center().y*dbu/1000.
#Write definition
text = 'START_SECTION IMAGE_DISTRIBUTION\n'
text += '\tIMAGE_ID "{}"\n'.format(image.cell.basic_name())
text += '\tINSTANCE_ID "{}"\n'.format("<Default>")
text += '\tCELL_SELECTION "{:.0f}" "{:.0f}"\n'.format(i, j)
text += '\tDISTRIBUTION_ACTION "I"\n'
text += '\tOPTIMIZE_ROUTE "N"\n'
text += '\tIMAGE_CELL_SHIFT {:.06f} {:.06f}\n'.format(x, y)
text += 'END_SECTION\n\n'
print(text)
job_file.write(text)
#for image in subcell.each_inst():
#print(image.cell.basic_name())
'''text = 'START_SECTION IMAGE_DISTRIBUTION\n'
text += '\tIMAGE_ID "{}"\n'.format()
text += '\tINSTANCE_ID "{}"\n'.format()
text += '\tCELL_SELECTION "{}" "{}"\n'.format()
text += '\tDISTRIBUTION_ACTION "I"\n'
text += '\tOPTIMIZE_ROUTE "N"\n'
text += '\tIMAGE_CELL_SHIFT {} {}\n'.format()
text += 'END_SECTION\n\n'
'''
def write_images():
#Load View
app = pya.Application.instance()
mw = app.main_window()
lv = mw.current_view()
ly = lv.active_cellview().layout()
dbu = ly.dbu
if lv==None:
raise Exception("No view selected")
cv = lv.cellview(lv.active_cellview_index())
# get the current cell view (Make sure you're running this on your mask)
if (cv.cell.name.lower() != "wafer"):
raise "Make your \"Wafer\" cell your current view."
if not cv.is_valid():
raise "No layout selected"
#Get parameters
filename = pya.FileDialog.ask_save_file_name("ASML Job file", "test.asml", "ASML files (*.asml *.txt)")
if filename:
job_file = open(filename, 'a')
#Print image distribution
write_distribution(job_file, cv, dbu)
#Print layer definition
#Print process data
#Print reticle data
job_file.close()
x = MenuAction("Write Image Distribution", "", write_images)
app = pya.Application.instance()
mw = app.main_window()
menu = mw.menu()
menu.insert_separator("@hcp_context_menu.end", "sep_write_images")
menu.insert_item("@hcp_context_menu.end", "write_images", x) | mit |
sbryan12144/BeastMode-Elite | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
mkowoods/deep-learning | transfer-learning/tensorflow_vgg/vgg19.py | 153 | 4616 | import os
import tensorflow as tf
import numpy as np
import time
import inspect
VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
def __init__(self, vgg19_npy_path=None):
if vgg19_npy_path is None:
path = inspect.getfile(Vgg19)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg19.npy")
vgg19_npy_path = path
print(vgg19_npy_path)
self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item()
print("npy file loaded")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
self.data_dict = None
print(("build model finished: %ds" % (time.time() - start_time)))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
| mit |
xiangel/hue | desktop/core/ext-py/Django-1.6.10/tests/admin_ordering/tests.py | 49 | 6672 | from __future__ import absolute_import, unicode_literals
from django.test import TestCase, RequestFactory
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth.models import User
from .models import (Band, Song, SongInlineDefaultOrdering,
SongInlineNewOrdering, DynOrderingBandAdmin)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.get_queryset uses the ordering we define
in ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
self.request_factory = RequestFactory()
b1 = Band(name='Aerosmith', bio='', rank=3)
b1.save()
b2 = Band(name='Radiohead', bio='', rank=1)
b2.save()
b3 = Band(name='Van Halen', bio='', rank=2)
b3.save()
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, None)
names = [b.name for b in ma.get_queryset(request)]
self.assertEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, None)
names = [b.name for b in ma.get_queryset(request)]
self.assertEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
def test_dynamic_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering dinamically.
"""
super_user = User.objects.create(username='admin', is_superuser=True)
other_user = User.objects.create(username='other')
request = self.request_factory.get('/')
request.user = super_user
ma = DynOrderingBandAdmin(Band, None)
names = [b.name for b in ma.get_queryset(request)]
self.assertEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
request.user = other_user
names = [b.name for b in ma.get_queryset(request)]
self.assertEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
class TestInlineModelAdminOrdering(TestCase):
"""
Let's make sure that InlineModelAdmin.get_queryset uses the ordering we
define in InlineModelAdmin.
"""
def setUp(self):
b = Band(name='Aerosmith', bio='', rank=3)
b.save()
self.b = b
s1 = Song(band=b, name='Pink', duration=235)
s1.save()
s2 = Song(band=b, name='Dude (Looks Like a Lady)', duration=264)
s2.save()
s3 = Song(band=b, name='Jaded', duration=214)
s3.save()
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
inline = SongInlineDefaultOrdering(self.b, None)
names = [s.name for s in inline.get_queryset(request)]
self.assertEqual(['Dude (Looks Like a Lady)', 'Jaded', 'Pink'], names)
def test_specified_ordering(self):
"""
Let's check with ordering set to something different than the default.
"""
inline = SongInlineNewOrdering(self.b, None)
names = [s.name for s in inline.get_queryset(request)]
self.assertEqual(['Jaded', 'Pink', 'Dude (Looks Like a Lady)'], names)
class TestRelatedFieldsAdminOrdering(TestCase):
def setUp(self):
self.b1 = Band(name='Pink Floyd', bio='', rank=1)
self.b1.save()
self.b2 = Band(name='Foo Fighters', bio='', rank=5)
self.b2.save()
# we need to register a custom ModelAdmin (instead of just using
# ModelAdmin) because the field creator tries to find the ModelAdmin
# for the related model
class SongAdmin(admin.ModelAdmin):
pass
admin.site.register(Song, SongAdmin)
def check_ordering_of_field_choices(self, correct_ordering):
fk_field = admin.site._registry[Song].formfield_for_foreignkey(Song.band.field)
m2m_field = admin.site._registry[Song].formfield_for_manytomany(Song.other_interpreters.field)
self.assertEqual(list(fk_field.queryset), correct_ordering)
self.assertEqual(list(m2m_field.queryset), correct_ordering)
def test_no_admin_fallback_to_model_ordering(self):
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_with_no_ordering_fallback_to_model_ordering(self):
class NoOrderingBandAdmin(admin.ModelAdmin):
pass
admin.site.register(Band, NoOrderingBandAdmin)
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_ordering_beats_model_ordering(self):
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank', )
admin.site.register(Band, StaticOrderingBandAdmin)
# should be ordered by rank (defined by the ModelAdmin)
self.check_ordering_of_field_choices([self.b1, self.b2])
def test_custom_queryset_still_wins(self):
"""Test that custom queryset has still precedence (#21405)"""
class SongAdmin(admin.ModelAdmin):
# Exclude one of the two Bands from the querysets
def formfield_for_foreignkey(self, db_field, **kwargs):
if db_field.name == 'band':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
def formfield_for_manytomany(self, db_field, **kwargs):
if db_field.name == 'other_interpreters':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
admin.site.unregister(Song)
admin.site.register(Song, SongAdmin)
admin.site.register(Band, StaticOrderingBandAdmin)
self.check_ordering_of_field_choices([self.b2])
def tearDown(self):
admin.site.unregister(Song)
if Band in admin.site._registry:
admin.site.unregister(Band)
| apache-2.0 |
Alpheus/simc | casc_extract/casc_extract.py | 4 | 6915 | #!/usr/bin/env python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import optparse, sys, os
import build_cfg, casc
import binascii
parser = optparse.OptionParser( usage = 'Usage: %prog -d wow_install_dir [options] file_path ...')
parser.add_option( '--cdn', dest = 'online', action = 'store_true', help = 'Fetch data from Blizzard CDN [only used for mode=batch/extract]' )
parser.add_option( '-m', '--mode', dest = 'mode', choices = [ 'batch', 'unpack', 'extract', 'fieldlist' ],
help = 'Extraction mode: "batch" for file extraction, "unpack" for BLTE file unpack, "extract" for key or MD5 based file extract from local game client files' )
parser.add_option( '-b', '--dbfile', dest = 'dbfile', type = 'string', default = 'dbfile',
help = "A textual file containing a list of file paths to extract [default dbfile, only needed for mode=batch]" )
parser.add_option( '-r', '--root', dest = 'root_file', type = 'string', default = 'root',
help = 'Root file path location [default CACHE_DIR/root, only needed if --cdn is not set]' )
parser.add_option( '-e', '--encoding', dest = 'encoding_file', type = 'string', default = 'encoding',
help = 'Encoding file path location [default CACHE_DIR/encoding, only needed if --cdn is not set]' )
parser.add_option( '-d', '--datadir', dest = 'data_dir', type = 'string',
help = 'World of Warcraft install directory [only needed if --cdn is not set]' )
parser.add_option( '-o', '--output', type = 'string', dest = 'output',
help = "Output directory for dbc mode, output file name for unpack mode" )
parser.add_option( '-x', '--cache', type = 'string', dest = 'cache', default = 'cache', help = 'Cache directory [default cache]' )
parser.add_option( '--ptr', action = 'store_true', dest = 'ptr', default = False, help = 'Download PTR files [default no, only used for --cdn]' )
parser.add_option( '--beta', action = 'store_true', dest = 'beta', default = False, help = 'Download Beta files [default no, only used for --cdn]' )
parser.add_option( '--locale', action = 'store', dest = 'locale', default = 'en_US', help = 'Extraction locale [default en_US, only used for --cdn]' )
if __name__ == '__main__':
(opts, args) = parser.parse_args()
opts.parser = parser
if not opts.mode and opts.online:
cdn = casc.CDNIndex(opts)
cdn.CheckVersion()
sys.exit(0)
#elif opts.mode == 'fieldlist':
# build = build_cfg.BuildCfg(opts)
# if not build.open():
# sys.exit(1)
# bin = pe.Pe32Parser(opts)
# if not bin.open():
# sys.exit(1)
#
# bin.parse()
# bin.generate()
elif opts.mode == 'batch':
if not opts.output:
parser.error("Batch mode requires an output directory for the files")
fname_db = build_cfg.DBFileList(opts)
if not fname_db.open():
sys.exit(1)
blte = casc.BLTEExtract(opts)
if not opts.online:
build = build_cfg.BuildCfg(opts)
if not build.open():
sys.exit(1)
encoding = casc.CASCEncodingFile(opts, build)
if not encoding.open():
sys.exit(1)
index = casc.CASCDataIndex(opts)
if not index.open():
sys.exit(1)
root = casc.CASCRootFile(opts, build, encoding, index)
if not root.open():
sys.exit(1)
for file_hash, file_name in fname_db.items():
extract_data = None
file_md5s = root.GetFileHashMD5(file_hash)
file_keys = []
for md5s in file_md5s:
file_keys = encoding.GetFileKeys(md5s)
file_locations = []
for file_key in file_keys:
file_location = index.GetIndexData(file_key)
if file_location[0] > -1:
extract_data = (file_key, md5s, file_name.replace('\\', '/')) + file_location
break
if not extract_data:
continue
print('Extracting %s ...' % file_name)
if not blte.extract_file(*extract_data):
sys.exit(1)
else:
cdn = casc.CDNIndex(opts)
if not cdn.open():
sys.exit(1)
encoding = casc.CASCEncodingFile(opts, cdn)
if not encoding.open():
sys.exit(1)
root = casc.CASCRootFile(opts, cdn, encoding, None)
if not root.open():
sys.exit(1)
output_path = os.path.join(opts.output, cdn.build())
for file_hash, file_name in fname_db.items():
file_md5s = root.GetFileHashMD5(file_hash)
if not file_md5s:
continue
if len(file_md5s) > 1:
print('Duplicate files found (%d) for %s, selecting first one ...' % (len(file_md5s), file_name))
file_keys = encoding.GetFileKeys(file_md5s[0])
if len(file_keys) == 0:
continue
if len(file_keys) > 1:
print('More than one key found for %s, selecting first one ...' % file_name)
print('Extracting %s ...' % file_name)
data = cdn.fetch_file(file_keys[0])
if not data:
print('No data for a given key %s' % file_keys[0].encode('hex'))
continue
blte.extract_buffer_to_file(data, os.path.join(output_path, file_name.replace('\\', '/')))
elif opts.mode == 'unpack':
blte = casc.BLTEExtract(opts)
for file in args:
print('Extracting %s ...')
if not blte.extract_file(file):
sys.exit(1)
elif opts.mode == 'extract':
build = None
index = None
if not opts.online:
build = build_cfg.BuildCfg(opts)
if not build.open():
sys.exit(1)
index = casc.CASCDataIndex(opts)
if not index.open():
sys.exit(1)
else:
build = casc.CDNIndex(opts)
if not build.open():
sys.exit(1)
encoding = casc.CASCEncodingFile(opts, build)
if not encoding.open():
sys.exit(1)
root = casc.CASCRootFile(opts, build, encoding, index)
if not root.open():
sys.exit(1)
keys = []
md5s = None
if 'key:' in args[0]:
keys.append(binascii.unhexlify(args[0][4:]))
file_name = args[0][4:]
elif 'md5:' in args[0]:
md5s = args[0][4:]
keys = encoding.GetFileKeys(binascii.unhexlify(args[0][4:]))
if len(keys) == 0:
parser.error('No file found with md5sum %s' % args[0][4:])
else:
file_name = binascii.hexlify(keys[0]).decode('utf-8')
else:
file_md5s = root.GetFileMD5(args[0])
if len(file_md5s) == 0:
parser.error('No file named %s found' % args[0])
keys = encoding.GetFileKeys(file_md5s[0])
file_name = args[0]
#print args[0], len(file_md5s) and file_md5s[0].encode('hex') or 0, len(keys)
#sys.exit(0)
if len(keys) == 0:
parser.error('No encoding information found for %s' % args[0])
if len(keys) > 1:
parser.error('Found multiple file keys with %s' % args[0])
blte = casc.BLTEExtract(opts)
if not opts.online:
file_location = index.GetIndexData(keys[0])
if file_location[0] == -1:
parser.error('No file location found for %s' % args[0])
if not blte.extract_file(keys[0], md5s and md5s.decode('hex') or None, None, *file_location):
sys.exit(1)
else:
data = build.fetch_file(keys[0])
if not data:
print('No data for a given key %s' % keys[0].encode('hex'))
sys.exit(1)
output_path = os.path.join(opts.output, build.build())
blte.extract_buffer_to_file(data, os.path.join(output_path, file_name.replace('\\', '/')))
| gpl-3.0 |
turbidsoul/tsutil | tsutil/http_server.py | 1 | 5471 | # -*- coding: utf-8 -*-
import asyncio
import logging
import os
from typing import Union
import mimetypes
req_log = logging.getLogger('request')
req_log.setLevel(logging.INFO)
dir_template = '''<html>
<head>
<title>目录 {dir_name}</title>
</head>
<body>
<h2>{nav}</h2>
<hr>
{content}
</body>
</html>'''
file_template = '''<html>
<head>
<title>文件: {file_name}</title>
<link rel="stylesheet"
href="//cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/styles/default.min.css">
<script src="//cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad();</script>
</head>
<body>
<h2>{nav}</h2>
<hr>
<pre>
<code class="{lang}">{content}</code>
</pre>
</body>
</html>'''
def is_text(path):
type = mimetypes.guess_type(path)[0]
return type and type == 'text/plain'
class BasicHttpResponse(object):
protocol = 'HTTP'
protocol_version = '1.1'
status_code = 200
headers = [
('Server', 'asyncio-server')
]
body = ''
template = '''{}/{} {} OK
{}
{}'''
def __str__(self):
headers = "\r\n".join(map(lambda h: h[0] + ':' + h[1], self.headers))
return self.template.format(self.protocol, self.protocol_version, self.status_code, headers, self.body)
class HttpServerProtocol(asyncio.Protocol):
base_dir: Union[str]
def __init__(self, base_dir):
super(HttpServerProtocol).__init__()
self.base_dir = base_dir
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
req_log.debug('Connection from %s', peername)
self.transport = transport
def data_received(self, data):
req_log.debug('http request body: %s', data)
do_method, req_path, _, _ = self.parse_request(data.decode('utf-8'))
method = getattr(self, 'do_' + do_method)
body_html = ''
if method is not None:
body_html = method(req_path)
else:
r = BasicHttpResponse()
r.body = '<h1>404</h1>'
body_html = r
self.transport.write(str(body_html).encode('utf-8'))
req_log.debug('Close the client socket')
self.transport.close()
def do_GET(self, req_path: Union[str]) -> Union[BasicHttpResponse]:
path = os.path.join(self.base_dir, req_path)
r = BasicHttpResponse()
if not os.path.exists(path):
r.status_code = 404
r.body = '<h1>404</h1>'
return r
if os.path.isfile(path):
if is_text(path):
r.headers.append(('Content-Type', 'text/html; charset=utf-8'))
r.body = self.do_text_file(path, req_path)
else:
r.headers.append(('Content-Type', 'application/octet-stream'))
else:
r.headers.append(('Content-Type', 'text/html; charset=utf-8'))
r.body = self.do_dir(path, req_path)
return r
def do_POST(self, req_path: Union[str]):
pass
def parse_request(self, data: Union[str]):
method, path, _ = data.split('\r\n')[0].split(' ')
req_log.info('%s %s', method, path)
query_string, hash_string = None, None
if '?' in path:
path, query_string = path.split('?')
query_string = query_string.strip()
query_string = query_string if len(query_string) > 0 else None
qs_map = {}
if query_string and '#' in query_string:
query_string, hash_string = query_string.split('#')
qs_map = dict(map(lambda it: it.split('='), query_string.split('&')))
return method, path, qs_map, hash_string
def do_text_file(self, file_path, req_path):
req_log.debug(mimetypes.guess_type(file_path))
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
ext = file_path.split('.')[-1]
return file_template.format(file_name=file_path, nav=file_path, lang=ext, content=content)
def do_dir(self, dir_path, req_path):
dir_html = self.list_dir(dir_path, req_path)
return dir_template.format(dir_name=dir_path, nav=dir_path, content=dir_html)
def nav(self, dir: Union[str]) -> Union[str]:
pass
def list_dir(self, dir_path: Union[str], req_path: Union[str]) -> Union[str]:
return '<ul>' + "".join(map(lambda d: '<li><a href="'+os.path.join(req_path, d)+'">' + d + '</a></li>', os.listdir(dir_path))) + '</ul>'
async def main(bind: Union[str], port: Union[int], verbose: Union[bool], base_dir: Union[str]):
loop = asyncio.get_running_loop()
server = await loop.create_server(lambda: HttpServerProtocol(base_dir), bind, port)
try:
req_log.info('starting HTTP server: %s:%s', bind, port)
await server.serve_forever()
except:
server.close()
await server.wait_closed()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser('简易HTTP服务')
parser.add_argument('-b', '--bind', type=str, default='127.0.0.1', help='host地址(default: 127.0.0.1)')
parser.add_argument('-p', '--port', type=int, default=8888, help='端口号(default: 8888)')
parser.add_argument('-d', '--directory', type=str, default=os.getcwd(), help='当前目录(default: %s)' % os.getcwd())
parser.add_argument('--debug', default=False, action='store_true', help='debug日志')
parser.add_argument('--verbose', action='store_true', default=False, help='打印详细日志')
args = parser.parse_args()
logging.basicConfig(level={True: logging.DEBUG, False: logging.INFO}[args.verbose | args.debug])
try:
asyncio.run(main(args.bind, args.port, args.verbose, args.directory))
except:
req_log.info('ctrl+c close server') | mit |
vmarkovtsev/django | tests/custom_columns/tests.py | 89 | 4105 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import Article, Author
class CustomColumnsTests(TestCase):
def setUp(self):
self.a1 = Author.objects.create(first_name="John", last_name="Smith")
self.a2 = Author.objects.create(first_name="Peter", last_name="Jones")
self.authors = [self.a1, self.a2]
self.article = Article.objects.create(headline="Django lets you build Web apps easily", primary_author=self.a1)
self.article.authors = self.authors
def test_query_all_available_authors(self):
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
six.text_type
)
def test_get_first_name(self):
self.assertEqual(
Author.objects.get(first_name__exact="John"),
self.a1,
)
def test_filter_first_name(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
six.text_type
)
def test_field_error(self):
self.assertRaises(
FieldError,
lambda: Author.objects.filter(firstname__exact="John")
)
def test_attribute_error(self):
with self.assertRaises(AttributeError):
self.a1.firstname
with self.assertRaises(AttributeError):
self.a1.last
def test_get_all_authors_for_an_article(self):
self.assertQuerysetEqual(
self.article.authors.all(), [
"Peter Jones",
"John Smith",
],
six.text_type
)
def test_get_all_articles_for_an_author(self):
self.assertQuerysetEqual(
self.a1.article_set.all(), [
"Django lets you build Web apps easily",
],
lambda a: a.headline
)
def test_get_author_m2m_relation(self):
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'), [
"Peter Jones"
],
six.text_type
)
def test_author_querying(self):
self.assertQuerysetEqual(
Author.objects.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
def test_author_filtering(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact='John'),
['<Author: John Smith>']
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistent_field(self):
self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'firstname' into field. Choices are: "
"Author_ID, article, first_name, last_name, primary_set",
Author.objects.filter,
firstname__exact='John'
)
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'firstname'",
getattr,
a, 'firstname'
)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'last'",
getattr,
a, 'last'
)
def test_m2m_table(self):
self.assertQuerysetEqual(
self.article.authors.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
self.assertQuerysetEqual(
self.a1.article_set.all(),
['<Article: Django lets you build Web apps easily>']
)
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'),
['<Author: Peter Jones>']
)
| bsd-3-clause |
archen/django | tests/utils_tests/test_datetime_safe.py | 54 | 2080 | import unittest
from datetime import date as original_date, datetime as original_datetime
from django.utils.datetime_safe import date, datetime
class DatetimeTests(unittest.TestCase):
def setUp(self):
self.just_safe = (1900, 1, 1)
self.just_unsafe = (1899, 12, 31, 23, 59, 59)
self.really_old = (20, 1, 1)
self.more_recent = (2006, 1, 1)
def test_compare_datetimes(self):
self.assertEqual(original_datetime(*self.more_recent), datetime(*self.more_recent))
self.assertEqual(original_datetime(*self.really_old), datetime(*self.really_old))
self.assertEqual(original_date(*self.more_recent), date(*self.more_recent))
self.assertEqual(original_date(*self.really_old), date(*self.really_old))
self.assertEqual(original_date(*self.just_safe).strftime('%Y-%m-%d'), date(*self.just_safe).strftime('%Y-%m-%d'))
self.assertEqual(original_datetime(*self.just_safe).strftime('%Y-%m-%d'), datetime(*self.just_safe).strftime('%Y-%m-%d'))
def test_safe_strftime(self):
self.assertEqual(date(*self.just_unsafe[:3]).strftime('%Y-%m-%d (weekday %w)'), '1899-12-31 (weekday 0)')
self.assertEqual(date(*self.just_safe).strftime('%Y-%m-%d (weekday %w)'), '1900-01-01 (weekday 1)')
self.assertEqual(datetime(*self.just_unsafe).strftime('%Y-%m-%d %H:%M:%S (weekday %w)'), '1899-12-31 23:59:59 (weekday 0)')
self.assertEqual(datetime(*self.just_safe).strftime('%Y-%m-%d %H:%M:%S (weekday %w)'), '1900-01-01 00:00:00 (weekday 1)')
# %y will error before this date
self.assertEqual(date(*self.just_safe).strftime('%y'), '00')
self.assertEqual(datetime(*self.just_safe).strftime('%y'), '00')
self.assertEqual(date(1850, 8, 2).strftime("%Y/%m/%d was a %A"), '1850/08/02 was a Friday')
def test_zero_padding(self):
"""
Regression for #12524
Check that pre-1000AD dates are padded with zeros if necessary
"""
self.assertEqual(date(1, 1, 1).strftime("%Y/%m/%d was a %A"), '0001/01/01 was a Monday')
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/Django/django/contrib/flatpages/tests/forms.py | 113 | 4014 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import translation
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
fixtures = ['example_site']
def setUp(self):
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True,
MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',))
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False,
MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',))
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'sites': [translation.ugettext('This field is required.')]})
| agpl-3.0 |
moloch--/TornadoAppTemplate | handlers/AuthenticationHandlers.py | 1 | 3911 | # -*- coding: utf-8 -*-
"""
@author: moloch
Copyright 2015
"""
import json
import logging
from datetime import datetime
import bcrypt
from tornado.options import options
from libs.JsonAPI import json_api_method
from libs.ValidationError import ValidationError
from models.User import User
from .BaseHandlers import APIBaseHandler
class BaseAuthenticationAPIHandler(APIBaseHandler):
AUTHENTICATION_TYPE = "base"
def post(self):
raise NotImplementedError()
def login_success(self, user):
"""
Create a session and return it to the client, sessions are *not*
cookies, instead we use a an hmac'd JSON blob that we hand to
the client. The client includes this hmac'd blob in a header
`X-DYNAMITE` on all requests (including GETs).
"""
logging.info("Successful authentication request for %s via '%s'",
user.name, self.AUTHENTICATION_TYPE)
user.last_login = datetime.now()
self.dbsession.add(user)
self.dbsession.commit()
session = self.start_session_for(user)
session['ip_address'] = self.request.remote_ip
secure_session = self.create_signed_value(name="session",
value=json.dumps(session))
# We put some data in here so the client can know when the session
# expires and what the user's name is, etc -but we never trust it.
# Server-side we only trust values from the hmac'd session `data`
return {
"username": user.name,
"password": None,
"data": secure_session,
"expires": int(session['expires']),
"permissions": user.permission_names,
"debug": options.debug,
}
def login_failure(self):
raise NotImplementedError()
class LoginAuthenticationAPIHandler(BaseAuthenticationAPIHandler):
""" This class handles login requests and creating sessions """
AUTHENTICATION_TYPE = "login"
@json_api_method({
"type": "object",
"properties": {
"username": {"type": "string", "minLength": 1, "maxLength": 16},
"password": {"type": "string", "minLength": 1, "maxLength": 72},
},
"required": ["username", "password"]
})
def post(self):
""" Login and create a new session """
user = User.by_name(self.get_argument('username', ''))
if user is not None:
session = self.login_attempt(user)
self.write(session)
else:
# To prevent a timing attack to enumerate users, since hashing
# takes non-zero time, we only we normally only hash if we got a
# user from the db, we just hash whatever we got anyways before
# telling the client the auth failed.
bcrypt.hashpw("password", bcrypt.gensalt())
self.login_failure()
def login_attempt(self, user):
"""
There's still a small timing attack here when we check the OTP, but to
exploit it you need to know the username and password, so 'meh'
"""
password = self.get_argument('password', '')
if user.validate_password(password):
if not user.otp_enabled:
return self.login_success(user)
else:
return self.otp_attempt(user)
else:
self.login_failure()
def otp_attempt(self, user):
otp = self.get_argument("otp", "")
if len(otp) != User.OTP_LENGTH:
self.login_failure()
if user.validate_otp(otp):
return self.login_success(user)
else:
self.login_failure()
def login_failure(self):
logging.info("Failed authentication attempt from %s",
self.request.remote_ip)
raise ValidationError("Incorrect username and/or password")
| apache-2.0 |
pmclanahan/basket | news/migrations/0001_initial.py | 2 | 3070 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subscriber'
db.create_table('news_subscriber', (
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, primary_key=True)),
('token', self.gf('django.db.models.fields.CharField')(default='b2b2a68b-202e-4519-a39d-e854ff09afd2', max_length=1024)),
))
db.send_create_signal('news', ['Subscriber'])
# Adding model 'Newsletter'
db.create_table('news_newsletter', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('title', self.gf('django.db.models.fields.CharField')(max_length=128)),
('description', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('show', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('welcome', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('vendor_id', self.gf('django.db.models.fields.CharField')(max_length=128)),
('languages', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('news', ['Newsletter'])
def backwards(self, orm):
# Deleting model 'Subscriber'
db.delete_table('news_subscriber')
# Deleting model 'Newsletter'
db.delete_table('news_newsletter')
models = {
'news.newsletter': {
'Meta': {'object_name': 'Newsletter'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'show': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'vendor_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'welcome': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'news.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'010e36cf-41b8-49e9-ac75-0b081b80f477'", 'max_length': '1024'})
}
}
complete_apps = ['news'] | mpl-2.0 |
windskyer/k_cinder | paxes_cinder/k2aclient/k2asample/tool_ssp_audit_plot.py | 1 | 18735 | #
#
# =================================================================
# =================================================================
import eventlet
from itertools import repeat
from collections import deque
import time
import pickle
from paxes_cinder.k2aclient.k2asample.tool_ssp_simulation import MockLu, \
ImagePool
import re
import time
from sets import Set
import matplotlib.pyplot as plt
import numpy as np
markers = [
(".", "point"),
(",", "pixel"),
("o", "circle"),
("v", "triangle_down"),
("^", "triangle_up"),
("<", "triangle_left"),
(">", "triangle_right"),
("1", "tri_down"),
("2", "tri_up"),
("3", "tri_left"),
("4", "tri_right"),
("8", "octagon"),
("s", "square"),
("p", "pentagon"),
("*", "star"),
("h", "hexagon1"),
("H", "hexagon2"),
("+", "plus"),
("x", "x"),
("D", "diamond"),
("d", "thin_diamond")
]
colors = [
("b", "blue"),
("g", "green"),
("r", "red"),
("c", "cyan"),
("m", "magenta"),
("y", "yellow"),
("k", "black"), # ("w", "white")
]
from paxes_cinder.k2aclient.k2asample.tool_ssp_simulation import \
OperationType, \
Simulation, \
Operation
def plotD(s):
x = s.total_number_of_image_deploys + s.total_number_of_snapshot_deploys
plt.figure("SSP: Instances: %s" % (x,))
plt.xlabel('Time')
plt.ylabel('Instances')
plt.title('SSP: Instances vs. Elapsed Time')
plt.grid(True)
leno = len(s.operations)
DAO = s.deploys_at_oper
SAO = s.snapshots_at_oper
TIME = []
for i, (o, e, t, x) in enumerate(s.operations):
if i == 0:
x0 = x
TIME.append((x - x0) / 60.)
# pDAO, = plt.plot(xrange(leno), DAO, 'bo')
# pSAO, = plt.plot(xrange(leno), SAO, 'bv')
pDAO, = plt.plot(TIME, DAO)
pSAO, = plt.plot(TIME, SAO)
# ymax = max([max(DFIa),max(DFIb),max(DFSa),max(DFSb)])
plt.legend([pDAO, pSAO],
["Deploys", "Snapshots"],
loc=4,
prop={'size': 8})
ymax = max([max(DAO), max(SAO)])
plt.axis([0, TIME[-1], 0, ymax * 1.25])
plt.show()
def plotB(s):
x = s.total_number_of_image_deploys + s.total_number_of_snapshot_deploys
plt.figure("SSP: Instances: %s" % (x,))
plt.xlabel('Operation number')
plt.ylabel('Instances')
plt.title('SSP: Instances vs. Operation number')
plt.grid(True)
leno = len(s.operations)
DAO = s.deploys_at_oper
SAO = s.snapshots_at_oper
# pDAO, = plt.plot(xrange(leno), DAO, 'bo')
# pSAO, = plt.plot(xrange(leno), SAO, 'bv')
pDAO, = plt.plot(xrange(leno), DAO)
pSAO, = plt.plot(xrange(leno), SAO)
# ymax = max([max(DFIa),max(DFIb),max(DFSa),max(DFSb)])
plt.legend([pDAO, pSAO],
["Deploys", "Snapshots"],
loc=4,
prop={'size': 8})
ymax = max([max(DAO), max(SAO)])
plt.axis([0, leno - 1, 0, ymax * 1.25])
plt.show()
def plotA(data, datafilter, jobs, jobsfilter):
PLS = []
datafilter_filter = []
datafilter_display = []
for (df, dd) in datafilter:
datafilter_filter.append(df)
datafilter_display.append(dd)
jobsfilter_filter = []
jobsfilter_display = []
for (df, dd) in jobsfilter:
jobsfilter_filter.append(df)
jobsfilter_display.append(dd)
# data
num_ops = 0
for k, dps in data.iteritems():
if k in datafilter_filter:
display = datafilter_display[datafilter_filter.index(k)]
PLSt = []
PLSv = []
for (t, v) in dps:
num_ops += 1
PLSt.append(t / 3600.0)
PLSv.append(v)
PLS.append((display, PLSt, PLSv))
# tjobs
num_jobs = 0
for k, dps in jobs.iteritems():
if k in jobsfilter_filter:
display = jobsfilter_display[jobsfilter_filter.index(k)]
PLSt = []
PLSv = []
for (t, v) in dps:
num_jobs += 1
PLSt.append(t / 3600.0)
PLSv.append(v)
PLS.append((display, PLSt, PLSv))
# print dps
# for (t,v) in dps:
# PLSt.append(t)
# PLSv.append(v)
# PLS.append(("JOB", PLSt, PLSv))
plt.figure("SSP: Deploy of %d ops and %d jobs" %
(num_ops, num_jobs))
msg = ("Operations: %d\n"
"Jobs = %d\n")
msg = msg % (num_ops,
num_jobs,)
# plt.text(1, 1.0, 'Total number of deploys = %s\nfoo' % (x,), fontsize=8)
# plt.text(400, 22.0, msg, fontsize=8)
plt.text(.2, 7.0, msg, fontsize=8)
legplot = []
leglabl = []
xmaxa = []
ymaxa = []
for i, plot in enumerate(PLS):
display, PLSt, PLSv = plot
ci = i % len(colors)
xmaxa.append(max(PLSt))
ymaxa.append(max(PLSv))
# print colors[ci][0]
legplot.append(plt.plot(PLSt, PLSv, colors[ci][0] + "o")[0])
leglabl.append(display)
# DFIi = PLSt
# DFIa = PLSv
#
# pDFIa, = plt.plot(DFIi, DFIa, 'bo')
plt.legend(legplot,
leglabl,
loc=2,
prop={'size': 8})
xmax = max(xmaxa)
ymax = max(ymaxa)
#
# ymax = max([max(DFIa)])
# xmax = max([max(DFIi)])
plt.axis([0, xmax, 0, ymax * 1.25])
plt.xlabel('Elapsed time (hrs)')
plt.ylabel('Operation time (sec)')
plt.title('Operation time vs. Elapsed time') # message
title = "NO TITLE"
if hasattr(s, "title"):
title = s.title
msg = ("Deploy an LU:\n"
" .... -> (POST LU -> LULinkedClone -> GET LU) -> ...")
plt.text(1, 40.0, msg, fontsize=14)
plt.grid(True)
plt.show()
# lines
# pDFIa, = plt.plot(DFIi, DFIa, 'bo')
# pDFIb, = plt.plot(DFIi, DFIb, 'bv')
# pDFIx, = plt.plot(DFIi, DFIx, 'bx')
# pDFSa, = plt.plot(DFSi, DFSa, 'ko')
# pDFSb, = plt.plot(DFSi, DFSb, 'kv')
# pSADa, = plt.plot(SADi, SADa, 'co')
# pSADb, = plt.plot(SADi, SADb, 'cv')
# pDASa, = plt.plot(DASi, DASa, 'mo')
# pDADa, = plt.plot(DADi, DADa, 'yo')
# pEEEa, = plt.plot(EEEi, EEEa, 'ro')
# plt.legend([pDFIa, pDFIb, pDFIx,
# pDFSa, pDFSb, pSADa, pSADb, pDASa, pDADa, pEEEa],
# ["DeployFromImage-Create", "DeployFromImage-Clone",
# "DeployFromImage-Total",
# "DeployFromSnap-Create", "DeployFromSnap-Clone",
# "SnapaDeploy-Create", "SnapaDeploy-Clone",
# "DelaSnap",
# "DelaDeploy",
# "Exception"],
# loc=2,
# prop={'size': 8})
# ymax = max([max(DFIa), max(DFIb), max(DFIx)])
# plt.axis([0, len(s.operations) - 1, 0, ymax * 1.25])
# plt.show()
return
msg = "total_number_of_image_deploys >%d<"
print msg % (s.total_number_of_image_deploys,)
msg = "total_number_of_snapshot_deploys >%d<"
print msg % (s.total_number_of_snapshot_deploys,)
msg = "total_number_of_snapshots >%d<"
print msg % (s.total_number_of_snapshots,)
# # the histogram of the data
# n, bins, patches = plt.hist(len(s.operations), 50, normed=1,
# facecolor='g', alpha=0.75)
#
# plt.xlabel('Smarts')
# plt.ylabel('Probability')
# plt.title('Histogram of IQ')
# plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# plt.axis([40, 160, 0, 0.03])
# plt.grid(True)
# plt.show()
# x = s.total_number_of_image_deploys + s.total_number_of_snapshot_deploys
# plt.figure("SSP: Deploy: %s" % (x,))
# plt.xlabel('Operation number')
# plt.ylabel('Operation time (sec)')
# plt.title('SSP: Operation time vs. Operation number')
# plt.grid(True)
# for (d, o) in s.deploy_inflections:
# print d
# if d == "D2I":
# plt.axvline(x=o, linewidth=1, color='r')
# else:
# plt.axvline(x=o, linewidth=1, color='r', linestyle="dashed")
# for (d, o) in s.snapshot_inflections:
# print d
# if d == "D2I":
# plt.axvline(x=o, linewidth=1, color='b')
# else:
# plt.axvline(x=o, linewidth=1, color='b', linestyle="dashed")
# # message
# title = "NO TITLE"
# if hasattr(s, "title"):
# title = s.title
# msg = ("Title: >%s<\n"
# "Total number of deploys = %s\n"
# "Deploys: min: %d, max: %d\n"
# "Snapshots: min: %d, max: %d\n"
# "Image pool size: %d\n"
# "Threads: %d")
# msg = msg % (x,
# title,
# s.min_deploys, s.max_deploys,
# s.min_snapshots, s.max_snapshots,
# s.image_pool_size,
# s.num_threads)
# # plt.text(1, 1.0, 'Total number of deploys = %s\nfoo' % (x,),
# # fontsize=8)
# plt.text(10, 75.0, msg, fontsize=8)
# lines
# pDFIa, = plt.plot(DFIi, DFIa, 'bo')
# pDFIb, = plt.plot(DFIi, DFIb, 'bv')
# pDFIx, = plt.plot(DFIi, DFIx, 'bx')
# pDFSa, = plt.plot(DFSi, DFSa, 'ko')
# pDFSb, = plt.plot(DFSi, DFSb, 'kv')
# pSADa, = plt.plot(SADi, SADa, 'co')
# pSADb, = plt.plot(SADi, SADb, 'cv')
# pDASa, = plt.plot(DASi, DASa, 'mo')
# pDADa, = plt.plot(DADi, DADa, 'yo')
# pEEEa, = plt.plot(EEEi, EEEa, 'ro')
# # ymax = max([max(DFIa),max(DFIb),max(DFSa),max(DFSb)])
# plt.legend([pDFIa, pDFIb, pDFIx,
# pDFSa, pDFSb, pSADa, pSADb, pDASa, pDADa, pEEEa],
# ["DeployFromImage-Create", "DeployFromImage-Clone",
# "DeployFromImage-Total",
# "DeployFromSnap-Create", "DeployFromSnap-Clone",
# "SnapaDeploy-Create", "SnapaDeploy-Clone",
# "DelaSnap",
# "DelaDeploy",
# "Exception"],
# loc=2,
# prop={'size': 8})
# ymax = max([max(DFIa), max(DFIb), max(DFIx)])
# plt.axis([0, len(s.operations) - 1, 0, ymax * 1.25])
plt.show()
if __name__ == '__main__':
# ("POST",
# "/rest/api/uom/SharedStoragePool/39cc86f0-bc1a-33a8-8d22-20c4b4f3a8d3",
# "200")
# ("POST",
# "/rest/api/uom/SharedStoragePool/39cc86f0-bc1a-33a8-8d22-20c4b4f3a8d3",
# "412")
# ("POST",
# "/rest/api/uom/SharedStoragePool/39cc86f0-bc1a-33a8-8d22-20c4b4f3a8d3",
# "412")
#
# ("GET",
# "/rest/api/uom/Cluster/c43fbdcd-95f2-3b4a-b643-234ff00eded4",
# "200")
# ("")
p = re.compile(r'^<\d+>\d+ (?P<minute>\d\d\d\d-\d\d-\d\dT\d\d:\d\d):'
'(?P<second>.*)Z '
'.* REST - PMCA\d\d\d\d \['
'(?P<params>.*)\](?P<remain>.*)')
# > Job START: Operation="ClusterLULinkedClone"; JobID="1394637525237".<
js = re.compile(r'^ Job START: Operation="(?P<oper>.*)";'
' JobID="(?P<jobid>.*)"\.')
# > Job END : Operation="ClusterLULinkedClone"; JobID="1394637525241";
# Status="COMPLETED_OK".<
je = re.compile(r'^ Job END : Operation="(?P<oper>.*)";'
' JobID="(?P<jobid>.*)"; Status="(?P<status>.*)"\.')
# p = re.compile(r'<\d+>\d+ \d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d\.\d\d\dZ.*')
start = "2014-03-14T11:21:56.780Z"
start = "2014-03-14T12:37:07.202Z"
start = "2014-03-14T12:37:22.769Z"
start = "2014-03-18T07:00:02.910Z"
stop = "2014-03-15T00:22:16.485Z"
# OLD
datafilter = [("GET%/rest/api/uom/SharedStoragePool/"
"39cc86f0-bc1a-33a8-8d22-20c4b4f3a8d3", "GET LU"),
("POST%/rest/api/uom/SharedStoragePool/"
"39cc86f0-bc1a-33a8-8d22-20c4b4f3a8d3", "POST LU")]
jobsfilter = [("ClusterLULinkedClone", "LULinkedClone")]
auditlog = 'my_audit_REJY_0314A'
auditlog = 'my_audit_REJY_0318A'
datafilter = []
jobsfilter = [("ClusterLULinkedClone", "LULinkedClone"),
("ClusterCreateLogicalUnit", "CreateLogicalUnit")]
# auditlog = 'my_audit_REJY_0321A'
# start = "2014-03-22T05:00:56.780Z"
# auditlog = 'my_audit_REJY_0321B'
# start = "2014-03-22T11:03:30.631Z"
auditlog = 'my_audit_REJY_0323A'
start = "2014-03-24T00:52:42.593Z"
datafilter = [("GET%/rest/api/uom/SharedStoragePool/"
"29915d6b-86af-3fdd-8a8d-fea6cab1dc91"
"%200", "GET LU 200"),
("POST%/rest/api/uom/SharedStoragePool/"
"29915d6b-86af-3fdd-8a8d-fea6cab1dc91"
"%200", "POST LU 200"),
("POST%/rest/api/uom/SharedStoragePool/"
"29915d6b-86af-3fdd-8a8d-fea6cab1dc91"
"%412", "POST LU 412")
]
jobsfilter = [("ClusterCreateLogicalUnit", "CreateLogicalUnit")]
auditlog = 'my_audit/my_audit_N7_0404A'
start = "2014-04-04T17:48:32.285Z"
auditlog = 'my_audit/my_audit_N7_0406A'
start = "2014-04-07T01:35:19.301Z"
auditlog = 'my_audit/my_audit_N7_0407A'
start = "2014-04-07T01:35:19.301Z"
auditlog = 'my_audit/my_audit_N7_0407B'
start = "2014-04-07T18:05:21.210Z"
auditlog = 'my_audit/my_audit_N7_0407C'
start = "2014-04-08T01:52:07.478Z"
auditlog = 'my_audit/my_audit_N7_0408A'
start = "2014-04-08T17:10:47.093Z"
auditlog = 'my_audit/my_audit_N7_0409A'
start = "2014-04-09T04:55:58.319Z"
totals = {}
data = {}
tjobs = {}
running_jobs = Set()
started = False
first = True
count = 0
with open(auditlog, 'r') as f:
# s = f.readline()
for s in f:
#
# print s
m = p.match(s)
# 2014-03-14T11:21:52.497
# 2014-03-14T11:21 minute
# 52.497 second
ms = m.group('minute')
# print ms
ss = m.group('second')
# print ss
fs = ms + ":" + ss + "Z"
# print fs
if not started and fs == start:
started = True
if not started:
continue
# print s
# print fs
mt = time.strptime(ms, "%Y-%m-%dT%H:%M")
# convert to seconds since epoch
sse = time.mktime(mt) + float(ss)
if first:
sse0 = sse
first = False
sselapsed = sse - sse0
# print (sse - sse0) / 3600.0
# add seconds
# mt.tm_sec = float(ss)
# print mt
ps = m.group('params')
psas = ps.split(" ", 1)[1]
# print psas
d = {}
while len(psas) > 0:
equal = psas.find("=")
name = psas[:equal]
psas = psas[equal + 2:]
quote = psas.find('"')
value = psas[:quote]
d[name] = value
psas = psas[quote + 2:]
# print d
if "HM" in d:
key = d["HM"] + "%" + d["URI"] + "%" + d["HS"]
# totals
if key not in totals:
totals[key] = 0
totals[key] += float(d["ET"])
if d["HM"] == "POST" and d["HS"] == "200":
count += 1
# data
if key not in data:
data[key] = []
d = (sselapsed, float(d["ET"]))
data[key].append(d)
else:
rs = m.group('remain')
# print "rs: >%s<" % (rs,)
jse = js.match(rs)
if jse:
# print ("JOB START: oper: >%s<, jobid: >%s<" %
# (jse.group('oper'), jse.group('jobid')))
jobid = jse.group('jobid')
running_jobs.add(jobid)
d = sselapsed
tjobs[jobid] = d
jee = je.match(rs)
if jee:
jobid = jee.group('jobid')
oper = jee.group('oper')
if jobid in tjobs:
# print ("JOB END: oper: >%s<, jobid: >%s<,"
# " status: >%s<, sselapsed: >%f<,"
# " opertime: >%f<" %
# (jee.group('oper'),
# jee.group('jobid'),
# jee.group('status'),
# sselapsed, sselapsed - tjobs[jobid]))
running_jobs.remove(jobid)
d = (oper, sselapsed, sselapsed - tjobs[jobid])
tjobs[jobid] = d
if len(running_jobs) > 0:
for rj in running_jobs:
del tjobs[rj]
jobs = {}
for k, v in tjobs.iteritems():
(oper, t, et) = v
if oper not in jobs:
jobs[oper] = []
jobs[oper].append((t, et))
print count
print totals
# raise Exception("quit")
# print d
# value_end = psas[]
# print psas[equal+2:]
# print "name: >%s<, value: >%s<" % (name, value)
# d = {}
# d["p0"] = psas[0]
# for psa in psas:
# equal = psa.find("=")
# name = psa[:equal]
#
# print psa.split("=")
# value_start = psa[equal+2:]
# print m.groups()
plotA(data, datafilter, jobs, jobsfilter)
# plotB(s)
# plotD(s)
# http://stackoverflow.com/questions/7733693/
# matplotlib-overlay-plots-with-different-scales
# marker description
# "." point
# "," pixel
# "o" circle
# "v" triangle_down
# "^" triangle_up
# "<" triangle_left
# ">" triangle_right
# "1" tri_down
# "2" tri_up
# "3" tri_left
# "4" tri_right
# "8" octagon
# "s" square
# "p" pentagon
# "*" star
# "h" hexagon1
# "H" hexagon2
# "+" plus
# "x" x
# "D" diamond
# "d" thin_diamond
# "|" vline
# "_" hline
# TICKLEFT tickleft
# TICKRIGHT tickright
# TICKUP tickup
# TICKDOWN tickdown
# CARETLEFT caretleft
# CARETRIGHT caretright
# CARETUP caretup
# CARETDOWN caretdown
# "None" nothing
# None nothing
# " " nothing
# "" nothing
# '$...$' render the string using mathtext.
# verts a list of (x, y) pairs used for Path vertices.
# path a Path instance.
# (numsides, style, angle) see below
# The location of the legend can be specified by the keyword argument loc,
# either by string or a integer number.
#
# String Number
# upper right 1
# upper left 2
# lower left 3
# lower right 4
# right 5
# center left 6
# center right 7
# lower center 8
# upper center 9
# center 10
| apache-2.0 |
ScholzVolkmer/django-cms | cms/management/commands/subcommands/base.py | 36 | 1617 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
import sys
class SubcommandsCommand(BaseCommand):
subcommands = {}
command_name = ''
def __init__(self):
super(SubcommandsCommand, self).__init__()
for name, subcommand in self.subcommands.items():
subcommand.command_name = '%s %s' % (self.command_name, name)
def handle(self, *args, **options):
stderr = getattr(self, 'stderr', sys.stderr)
stdout = getattr(self, 'stdout', sys.stdout)
if len(args) > 0:
if args[0] in self.subcommands.keys():
handle_command = self.subcommands.get(args[0])()
handle_command.stdout = stdout
handle_command.stderr = stderr
handle_command.handle(*args[1:], **options)
else:
stderr.write("%r is not a valid subcommand for %r\n" % (args[0], self.command_name))
stderr.write("Available subcommands are:\n")
for subcommand in sorted(self.subcommands.keys()):
stderr.write(" %r\n" % subcommand)
raise CommandError('Invalid subcommand %r for %r' % (args[0], self.command_name))
else:
stderr.write("%r must be called with at least one argument, it's subcommand.\n" % self.command_name)
stderr.write("Available subcommands are:\n")
for subcommand in sorted(self.subcommands.keys()):
stderr.write(" %r\n" % subcommand)
raise CommandError('No subcommand given for %r' % self.command_name) | bsd-3-clause |
morrisonlevi/FrameworkBenchmarks | php-fuel/setup.py | 5 | 1857 | import subprocess
import sys
import os
import setup_util
from os.path import expanduser
home = expanduser("~")
def start(args, logfile, errfile):
setup_util.replace_text("php-fuel/fuel/app/config/production/db.php", "localhost", ""+ args.database_host +"")
setup_util.replace_text("php-fuel/deploy/nginx.conf", "root .*\/FrameworkBenchmarks", "root " + home + "/FrameworkBenchmarks")
try:
if os.name == 'nt':
subprocess.check_call('icacls "C:\\FrameworkBenchmarks\\php-fuel" /grant "IIS_IUSRS:(OI)(CI)F"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:"C:\\FrameworkBenchmarks\\php-fuel"', shell=True, stderr=errfile, stdout=logfile)
return 0
subprocess.check_call("sudo chown -R www-data:www-data php-fuel", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo php-fpm --fpm-config config/php-fpm.conf -g " + home + "/FrameworkBenchmarks/php-fuel/deploy/php-fpm.pid", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/php-fuel/deploy/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
if os.name == 'nt':
subprocess.call('appcmd delete site PHP', shell=True, stderr=errfile, stdout=logfile)
return 0
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("sudo kill -QUIT $( cat php-fuel/deploy/php-fpm.pid )", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo chown -R $USER:$USER php-fuel", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1 | bsd-3-clause |
jbedorf/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/conditional_distribution_test.py | 135 | 3028 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Bernoulli distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.kernel_tests import distribution_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConditionalDistributionTest(distribution_test.DistributionTest):
def _GetFakeDistribution(self):
class _FakeDistribution(distributions.ConditionalDistribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(_FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
def _sample_n(self, unused_shape, unused_seed, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_prob(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _prob(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _cdf(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_cdf(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_survival_function(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _survival_function(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
return _FakeDistribution
def testNotImplemented(self):
d = self._GetFakeDistribution()(batch_shape=[], event_shape=[])
for name in ["sample", "log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function"]:
method = getattr(d, name)
with self.assertRaisesRegexp(ValueError, "b1.*b2"):
method([] if name == "sample" else 1.0, arg1="b1", arg2="b2")
if __name__ == "__main__":
test.main()
| apache-2.0 |
vineodd/PIMSim | GEM5Simulation/gem5/src/gpu-compute/X86GPUTLB.py | 2 | 3390 | # Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from m5.objects.MemObject import MemObject
if buildEnv['FULL_SYSTEM']:
class X86PagetableWalker(MemObject):
type = 'X86PagetableWalker'
cxx_class = 'X86ISA::Walker'
port = SlavePort("Port for the hardware table walker")
system = Param.System(Parent.any, "system object")
class X86GPUTLB(MemObject):
type = 'X86GPUTLB'
cxx_class = 'X86ISA::GpuTLB'
cxx_header = 'gpu-compute/gpu_tlb.hh'
size = Param.Int(64, "TLB size (number of entries)")
assoc = Param.Int(64, "TLB associativity")
if buildEnv['FULL_SYSTEM']:
walker = Param.X86PagetableWalker(X86PagetableWalker(),
"page table walker")
hitLatency = Param.Int(2, "Latency of a TLB hit")
missLatency1 = Param.Int(5, "Latency #1 of a TLB miss")
missLatency2 = Param.Int(100, "Latency #2 of a TLB miss")
maxOutstandingReqs = Param.Int(64, "# of maximum outstanding requests")
slave = VectorSlavePort("Port on side closer to CPU/CU")
master = VectorMasterPort("Port on side closer to memory")
allocationPolicy = Param.Bool(True, "Allocate on an access")
accessDistance = Param.Bool(False, "print accessDistance stats")
class TLBCoalescer(MemObject):
type = 'TLBCoalescer'
cxx_class = 'TLBCoalescer'
cxx_header = 'gpu-compute/tlb_coalescer.hh'
probesPerCycle = Param.Int(2, "Number of TLB probes per cycle")
coalescingWindow = Param.Int(1, "Permit coalescing across that many ticks")
slave = VectorSlavePort("Port on side closer to CPU/CU")
master = VectorMasterPort("Port on side closer to memory")
disableCoalescing = Param.Bool(False,"Dispable Coalescing")
| gpl-3.0 |
prds21/barrial-movie | servers/jumbofiles.py | 44 | 2439 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para jumbofiles
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[jumbofiles.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
# op=download2&id=oiyetnk5vwzf&rand=m2080mem&referer=&method_free=&method_premium=&down_direct=1&x=64&y=5
op = scrapertools.get_match(data,'<input type="hidden" name="op" value="([^"]+)">')
id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)">')
random_number = scrapertools.get_match(data,'<input type="hidden" name="rand" value="([^"]+)">')
down_direct = scrapertools.get_match(data,'<input type="hidden" name="down_direct" value="([^"]+)">')
post = "op=%s&id=%s&rand=%s&referer=&method_free=&method_premium=&down_direct=%s&x=64&y=5" % (op,id,random_number,down_direct)
data = scrapertools.cache_page(page_url,post=post)
#logger.info("data="+data)
#<FORM METHOD="LINK" ACTION="http://www96.jumbofiles.com:443/d/jbswjaebcr4eam62sd6ue2bb47yo6ldj5pcbc6wed6qteh73vjzcu/ORNE.avi">
video_url = scrapertools.get_match(data,'<FORM METHOD="LINK" ACTION="([^"]+)">')
video_urls.append( [ video_url[-4:]+" [jumbofiles]" , video_url ] )
for video_url in video_urls:
logger.info("[jumbofiles.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://jumbofiles.com/oiyetnk5vwzf
patronvideos = '(http://jumbofiles.com/[0-9a-z]+)'
logger.info("[jumbofiles.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[jumbofiles]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'jumbofiles' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 |
brain-tec/e-commerce | website_sale_product_brand/controllers/main.py | 3 | 3061 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import http
from openerp.http import request
from openerp import SUPERUSER_ID
from openerp.addons.website_sale.controllers.main import QueryURL
from openerp.addons.website_sale.controllers.main import website_sale
class WebsiteSale(website_sale):
@http.route(['/shop',
'/shop/page/<int:page>',
'/shop/category/<model("product.public.category"):category>',
"""/shop/category/<model("product.public.category"):category>
/page/<int:page>""",
'/shop/brands'],
type='http',
auth='public',
website=True)
def shop(self, page=0, category=None, brand=None, search='', **post):
if brand:
request.context.setdefault('brand_id', int(brand))
result = super(WebsiteSale, self).shop(page=page, category=category,
brand=brand, search=search,
**post)
result.qcontext['brand'] = brand
return result
# Method to get the brands.
@http.route(
['/page/product_brands'],
type='http',
auth='public',
website=True)
def product_brands(self, **post):
cr, context, pool = (request.cr,
request.context,
request.registry)
b_obj = pool['product.brand']
domain = []
if post.get('search'):
domain += [('name', 'ilike', post.get('search'))]
brand_ids = b_obj.search(cr, SUPERUSER_ID, domain)
brand_rec = b_obj.browse(cr, SUPERUSER_ID, brand_ids, context=context)
keep = QueryURL('/page/product_brands', brand_id=[])
values = {'brand_rec': brand_rec,
'keep': keep}
if post.get('search'):
values.update({'search': post.get('search')})
return request.website.render(
'website_sale_product_brand.product_brands',
values)
| agpl-3.0 |
victormanakkat/Taco-Chronicles | main.py | 1 | 3071 | # main.py
# By Tyler Spadgenske
VERSION = '1.0.19'
def main():
#Really important values
showFPS = True
TOTAL_OBJECTS = 1
#Import modules
import pygame, sys, os
from pygame.locals import *
#import custom classes
import background, button, toolbar, selectGun, selectTool, person, powerups, baddieAI
from background import Level_1
from button import Button
from toolbar import Toolbar
from selectGun import selectGunMenu
from selectTool import selectToolMenu
from person import Person
from powerups import Powerups
from baddieAI import AI
from l1 import L1
import load
from screen import Screen
from file import File
#Setup game data
getFiles = File()
highscore, totalscore, firstRun, lockedGuns = getFiles.read()
#Show Important information if program is run for first time
if firstRun:
from installer import Install
firstRun = False
#Setup the main screen display and clock
pygame.init()
os.environ ['SDL_VIDEO_WINDOW_POS'] = 'center'
WINDOWWIDTH = 1200
WINDOWHIEGHT = 600
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHIEGHT), 0, 32)
icon = pygame.image.load('files\\icon.png')
pygame.display.set_caption('The Taco Chronicles')
pygame.display.set_icon(icon)
mainClock = pygame.time.Clock()
load.Credits(windowSurface)
#Setup Colors
BLUE = (0,0,255)
SKY_BLUE = (0, 255, 255)
windowSurface.fill(SKY_BLUE)
lockedTools = {'crowbar':False, 'rope':True, 'key':True, 'TNT':True, 'jetpack':True}
sound = True
gameData = {'sound':sound, 'lockedGuns':lockedGuns, 'lockedTools':lockedTools}
restart = True
start = Screen(windowSurface)
clicked = False
windowSurface.fill((255, 255, 255))
pygame.mixer.music.load('files//sound//gameTheme.mp3')
exit = False
while True:
restart = True
clicked = False
pygame.mixer.music.play(-1, 0.0)
windowSurface.fill((255, 255, 255))
while True:
if exit:
getFiles.write(highscore, totalscore, firstRun, lockedGuns)
pygame.quit()
sys.exit()
clicked, exit = start.startScreen(highscore, clicked)
if clicked:
break
pygame.mixer.music.stop()
load.Load(windowSurface)
#Run the gameplay
count = 0
while True:
level = L1(windowSurface, mainClock, SKY_BLUE, gameData, showFPS)
restart, goBack, highscore, totalscore, exit = level.play(highscore, totalscore)
if goBack:
break
if exit:
break
if totalscore > 10000:
lockedGuns['shotgun'] = False
if totalscore > 15000:
lockedGuns['AK-47'] = False
if totalscore > 30000:
lockedGuns['bazooka'] = False
if totalscore > 35000:
lockedGuns['flamethrower'] = False
if __name__ == '__main__':
main()
| gpl-3.0 |
kongo/vim-presentation-ru | node_modules/grunt/node_modules/gzip-js/node_modules/deflate-js/test/runner.py | 177 | 1688 | #!/usr/bin/env python
import argparse
import deflate
import inflate
from colorama import Fore
testDir = 'test-files'
outDir = 'test-outs'
allPassed = True
parser = argparse.ArgumentParser(description='Process command-line arguments')
parser.add_argument('--test', metavar='path/to/file', type=str, default='both', nargs='?', help='Which test to run: deflate, inflate, or both')
parser.add_argument('--file', '-f', metavar='path/to/file', type=str, nargs='?', help='Path to file to use for test')
parser.add_argument('--level', '-l', metavar='#', type=int, nargs='?', help='Compression level')
parser.add_argument('--no-delete', const=True, default=False, nargs='?', help='Don\'t delete files produced for test')
args = parser.parse_args()
delete = not getattr(args, 'no_delete')
level = getattr(args, 'level')
inFile = getattr(args, 'file')
test = getattr(args, 'test')
if test == 'deflate' or test == 'both':
print Fore.CYAN + 'Running deflate tests' + Fore.RESET
passed = True
if inFile != None:
passed = deflate.runTest(inFile, level, delete, outDir)
else:
passed = deflate.runAll(level, delete, testDir, outDir)
# if we fail one test, we fail the entire test
allPassed = allPassed and passed
if test == 'inflate' or test == 'both':
print Fore.CYAN + 'Running inflate tests' + Fore.RESET
passed = True
if inFile != None:
passed = inflate.runTest(inFile, level, delete, outDir)
else:
passed = inflate.runAll(level, delete, testDir, outDir)
# if we fail one test, we fail the entire test
allPassed = allPassed and passed
if allPassed:
print Fore.GREEN + 'All tests passed!' + Fore.RESET
else:
print Fore.RED + 'Automated test failed' + Fore.RESET
| mit |
jbzdak/edx-platform | common/djangoapps/util/memcache.py | 251 | 1344 | """
This module provides a KEY_FUNCTION suitable for use with a memcache backend
so that we can cache any keys, not just ones that memcache would ordinarily accept
"""
from django.utils.encoding import smart_str
import hashlib
import urllib
def fasthash(string):
"""
Hashes `string` into a string representation of a 128-bit digest.
"""
md4 = hashlib.new("md4")
md4.update(string)
return md4.hexdigest()
def cleaned_string(val):
"""
Converts `val` to unicode and URL-encodes special characters
(including quotes and spaces)
"""
return urllib.quote_plus(smart_str(val))
def safe_key(key, key_prefix, version):
"""
Given a `key`, `key_prefix`, and `version`,
return a key that is safe to use with memcache.
`key`, `key_prefix`, and `version` can be numbers, strings, or unicode.
"""
# Clean for whitespace and control characters, which
# cause memcache to raise an exception
key = cleaned_string(key)
key_prefix = cleaned_string(key_prefix)
version = cleaned_string(version)
# Attempt to combine the prefix, version, and key
combined = ":".join([key_prefix, version, key])
# If the total length is too long for memcache, hash it
if len(combined) > 250:
combined = fasthash(combined)
# Return the result
return combined
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.