repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Zhongqilong/kbengine | kbe/src/lib/python/Lib/distutils/spawn.py | 81 | 7514 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils.debug import DEBUG
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new process.
'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
# cmd is documented as a list, but just in case some code passes a tuple
# in, protect our %-formatting code against horrible death
cmd = list(cmd)
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError(
"don't know how to spawn programs on platform '%s'" % os.name)
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args
def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError as exc:
# this seems to happen when the command isn't found
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed: %s" % (cmd, exc.args[-1]))
if rc != 0:
# and this reflects the command running but failing
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed with exit status %d" % (cmd, rc))
if sys.platform == 'darwin':
from distutils import sysconfig
_cfg_target = None
_cfg_target_split = None
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
log.info(' '.join(cmd))
if dry_run:
return
executable = cmd[0]
exec_fn = search_path and os.execvp or os.execv
env = None
if sys.platform == 'darwin':
global _cfg_target, _cfg_target_split
if _cfg_target is None:
_cfg_target = sysconfig.get_config_var(
'MACOSX_DEPLOYMENT_TARGET') or ''
if _cfg_target:
_cfg_target_split = [int(x) for x in _cfg_target.split('.')]
if _cfg_target:
# ensure that the deployment target of build process is not less
# than that used when the interpreter was built. This ensures
# extension modules are built with correct compatibility values
cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
'now "%s" but "%s" during configure'
% (cur_target, _cfg_target))
raise DistutilsPlatformError(my_msg)
env = dict(os.environ,
MACOSX_DEPLOYMENT_TARGET=cur_target)
exec_fn = search_path and os.execvpe or os.execve
pid = os.fork()
if pid == 0: # in the child
try:
if env is None:
exec_fn(executable, cmd)
else:
exec_fn(executable, cmd, env)
except OSError as e:
if not DEBUG:
cmd = executable
sys.stderr.write("unable to execute %r: %s\n"
% (cmd, e.strerror))
os._exit(1)
if not DEBUG:
cmd = executable
sys.stderr.write("unable to execute %r for unknown reasons" % cmd)
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while True:
try:
pid, status = os.waitpid(pid, 0)
except OSError as exc:
import errno
if exc.errno == errno.EINTR:
continue
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed: %s" % (cmd, exc.args[-1]))
if os.WIFSIGNALED(status):
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r terminated by signal %d"
% (cmd, os.WTERMSIG(status)))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed with exit status %d"
% (cmd, exit_status))
elif os.WIFSTOPPED(status):
continue
else:
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"unknown error executing %r: termination status %d"
% (cmd, status))
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
| lgpl-3.0 | 6,789,913,219,496,959,000 | 37.533333 | 80 | 0.565744 | false |
awkspace/ansible | lib/ansible/modules/network/onyx/onyx_mlag_ipl.py | 118 | 6779 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_mlag_ipl
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Manage IPL (inter-peer link) on Mellanox ONYX network devices
description:
- This module provides declarative management of IPL (inter-peer link)
management on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.4000
options:
name:
description:
- Name of the interface (port-channel) IPL should be configured on.
required: true
vlan_interface:
description:
- Name of the IPL vlan interface.
state:
description:
- IPL state.
default: present
choices: ['present', 'absent']
peer_address:
description:
- IPL peer IP address.
"""
EXAMPLES = """
- name: run configure ipl
onyx_mlag_ipl:
name: Po1
vlan_interface: Vlan 322
state: present
peer_address: 192.168.7.1
- name: run remove ipl
onyx_mlag_ipl:
name: Po1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface port-channel 1 ipl 1
- interface vlan 1024 ipl 1 peer-address 10.10.10.10
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import show_cmd
class OnyxMlagIplModule(BaseOnyxModule):
VLAN_IF_REGEX = re.compile(r'^Vlan \d+')
@classmethod
def _get_element_spec(cls):
return dict(
name=dict(required=True),
state=dict(default='present',
choices=['present', 'absent']),
peer_address=dict(),
vlan_interface=dict(),
)
def init_module(self):
""" module initialization
"""
element_spec = self._get_element_spec()
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(
name=module_params['name'],
state=module_params['state'],
peer_address=module_params['peer_address'],
vlan_interface=module_params['vlan_interface'])
self.validate_param_values(self._required_config)
def _update_mlag_data(self, mlag_data):
if not mlag_data:
return
mlag_summary = mlag_data.get("MLAG IPLs Summary", {})
ipl_id = "1"
ipl_list = mlag_summary.get(ipl_id)
if ipl_list:
ipl_data = ipl_list[0]
vlan_id = ipl_data.get("Vlan Interface")
vlan_interface = ""
if vlan_id != "N/A":
vlan_interface = "Vlan %s" % vlan_id
peer_address = ipl_data.get("Peer IP address")
name = ipl_data.get("Group Port-Channel")
self._current_config = dict(
name=name,
peer_address=peer_address,
vlan_interface=vlan_interface)
def _show_mlag_data(self):
cmd = "show mlag"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
# called in base class in run function
self._current_config = dict()
mlag_data = self._show_mlag_data()
self._update_mlag_data(mlag_data)
def _get_interface_cmd_name(self, if_name):
if if_name.startswith('Po'):
return if_name.replace("Po", "port-channel ")
self._module.fail_json(
msg='invalid interface name: %s' % if_name)
def _generate_port_channel_command(self, if_name, enable):
if_cmd_name = self._get_interface_cmd_name(if_name)
if enable:
ipl_cmd = 'ipl 1'
else:
ipl_cmd = "no ipl 1"
cmd = "interface %s %s" % (if_cmd_name, ipl_cmd)
return cmd
def _generate_vlan_if_command(self, if_name, enable, peer_address):
if_cmd_name = if_name.lower()
if enable:
ipl_cmd = 'ipl 1 peer-address %s' % peer_address
else:
ipl_cmd = "no ipl 1"
cmd = "interface %s %s" % (if_cmd_name, ipl_cmd)
return cmd
def _generate_no_ipl_commands(self):
curr_interface = self._current_config.get('name')
req_interface = self._required_config.get('name')
if curr_interface == req_interface:
cmd = self._generate_port_channel_command(
req_interface, enable=False)
self._commands.append(cmd)
def _generate_ipl_commands(self):
curr_interface = self._current_config.get('name')
req_interface = self._required_config.get('name')
if curr_interface != req_interface:
if curr_interface and curr_interface != 'N/A':
cmd = self._generate_port_channel_command(
curr_interface, enable=False)
self._commands.append(cmd)
cmd = self._generate_port_channel_command(
req_interface, enable=True)
self._commands.append(cmd)
curr_vlan = self._current_config.get('vlan_interface')
req_vlan = self._required_config.get('vlan_interface')
add_peer = False
if curr_vlan != req_vlan:
add_peer = True
if curr_vlan:
cmd = self._generate_vlan_if_command(curr_vlan, enable=False,
peer_address=None)
self._commands.append(cmd)
curr_peer = self._current_config.get('peer_address')
req_peer = self._required_config.get('peer_address')
if req_peer != curr_peer:
add_peer = True
if add_peer and req_peer:
cmd = self._generate_vlan_if_command(req_vlan, enable=True,
peer_address=req_peer)
self._commands.append(cmd)
def generate_commands(self):
state = self._required_config['state']
if state == 'absent':
self._generate_no_ipl_commands()
else:
self._generate_ipl_commands()
def main():
""" main entry point for module execution
"""
OnyxMlagIplModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,891,197,390,192,943,000 | 31.280952 | 92 | 0.583124 | false |
alexandrucoman/vbox-nova-driver | nova/tests/functional/v3/test_pci.py | 8 | 7468 | # Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from nova import db
from nova import objects
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
skip_msg = "Bug 1426241"
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:04:10.0',
'vendor_id': '8086',
'numa_node': 0,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_0',
'label': 'label_8086_1520',
'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
'request_id': None,
'extra_info': '{"key1": "value1", "key2": "value2"}'
}
fake_db_dev_2 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': '0000:04:10.1',
'vendor_id': '8086',
'numa_node': 1,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_1',
'label': 'label_8086_1520',
'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
'request_id': None,
'extra_info': '{"key3": "value3", "key4": "value4"}'
}
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
self.fake_compute_node = objects.ComputeNode(
cpu_info=jsonutils.dumps(cpu_info),
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"extra_info": {
"phys_function": '[["0x0000", '
'"0x04", "0x00",'
' "0x1"]]',
"key1": "value1"}}),)
self.fake_service = objects.Service(
id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = self.fake_compute_node
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = [self.fake_compute_node]
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def _fake_pci_device_get_by_id(self, context, id):
return fake_db_dev_1
def _fake_pci_device_get_all_by_node(self, context, id):
return [fake_db_dev_1, fake_db_dev_2]
def test_pci_show(self):
self.stubs.Set(db, 'pci_device_get_by_id',
self._fake_pci_device_get_by_id)
response = self._do_get('os-pci/1')
subs = self._get_regexes()
self._verify_response('pci-show-resp', subs, response, 200)
def test_pci_index(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci')
subs = self._get_regexes()
self._verify_response('pci-index-resp', subs, response, 200)
def test_pci_detail(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci/detail')
subs = self._get_regexes()
self._verify_response('pci-detail-resp', subs, response, 200)
| apache-2.0 | -5,800,958,422,579,611,000 | 34.226415 | 78 | 0.558382 | false |
hcasse/elfmake | elfmake/recipe.py | 1 | 6836 | """Classes used to represent recipes."""
import env
import action
import io
import os
import os.path
import sys
file_db = { } # file database
ext_db = { } # extension database
# base classes
class File(env.MapEnv):
"""Representation of files."""
path = None
recipe = None
is_goal = False
is_target = False
is_sticky = False
actual_path = None
def __init__(self, path):
env.MapEnv.__init__(self, path.get_file() , env.cenv.path, env.cenv)
self.path = path
file_db[str(path)] = self
def set_goal(self):
"""Mark a file as a goal."""
self.is_goal = True
def set_target(self):
"""Mark a file as a target."""
self.is_target = True
def set_sticky(self):
"""Mark a file as sticky, that is, a final target (not intermediate)."""
self.sticky = True
def actual(self):
"""Get the actual path of the file. For target file, this path
is relative to BPATH variable."""
if not self.actual_path:
if not self.is_target:
self.actual_path = self.path
else:
bpath = self["BPATH"]
if not bpath:
self.actual_path = self.path
else:
bpath = env.topenv.path / bpath
bpath = env.Path(bpath)
if self.path.prefixed_by(env.topenv.path):
self.actual_path = bpath / self.path.relative_to(env.topenv.path)
else:
self.actual_path = bpath / self.path
return self.actual_path
def __div__(self, arg):
return self.path / str(arg)
def time(self):
"""Get the last update time of the file."""
if self.is_goal:
return 0
else:
return self.actual().get_mod_time()
def younger_than(self, f):
"""Test if the current file is younger than the given one."""
if self.is_goal:
return True
else:
return self.time() < f.time()
def __str__(self):
path = self.actual()
if path.prefixed_by(env.topdir) or path.prefixed_by(env.curdir()):
return str(path.relative_to_cur())
else:
return str(path)
def get_file(path):
"""Get the file matching the given path in the DB. Apply
localisation rules relative to a particular make.py if the path
is not absolute."""
# apply localisation rule
if not os.path.isabs(str(path)):
path = env.cenv.path / path
else:
path = env.Path(path)
path = path.norm()
# find the file
if file_db.has_key(str(path)):
return file_db[str(path)]
else:
return File(path)
def get_files(paths):
"""Apply get_file on straight arguments of recipes."""
if not paths:
return []
if not isinstance(paths, list):
paths = [ paths ]
r = []
for p in paths:
if not isinstance(p, File):
p = get_file(p)
r.append(p)
return r
class Recipe:
"""A recipe to build files."""
ress = None
deps = None
env = None
cwd = None
def __init__(self, ress, deps = None):
ress = get_files(ress)
deps = get_files(deps)
self.ress = ress
self.deps = deps
for f in ress:
f.recipe = self
f.is_target = True
self.env = env.cenv
if hasattr(ress[0], 'cwd'):
self.cwd = ress[0].cwd
else:
self.cwd = self.env.path
def action(self, ctx):
"""Execute the receipe."""
pass
def display_action(self, out):
pass
def display(self, out):
out.write("%s: %s\n" % (" ".join([str(f) for f in self.ress]), " ".join([str(f) for f in self.deps])))
self.display_action(out)
out.write("\n")
class FunRecipe(Recipe):
"""A recipe that activates a function."""
fun = None
def __init__(self, fun, ress, deps):
Recipe.__init__(self, ress, deps)
self.fun = fun
def display_action(self, out):
out.write("\t<internal>\n")
def action(self, ctx):
self.fun(self.ress, self.deps, ctx)
class Ext:
"""Represent the support for a file extension."""
ext = None
gens = None
back = None
def __init__(self, ext):
self.ext = ext
self.gens = { }
self.backs = []
ext_db[ext] = self
def update(self, ext, gen):
"""Update extension for the given generator
and perform backward propagation."""
self.gens[ext] = gen
for back in self.backs:
back.dep.update(ext, back)
def get_ext(ext):
"""Obtain an extension."""
if ext_db.has_key(ext):
return ext_db[ext]
else:
return Ext(ext)
class Gen:
"""A generator of recipe."""
res = None
dep = None
def __init__(self, res, dep):
self.res = get_ext(res)
self.dep = get_ext(dep)
# update back link
self.res.backs.append(self)
# update current gens
self.dep.update(res, self)
for ext in self.dep.gens:
self.res.update(ext, self)
def gen(self, res, dep):
"""Generate a recipe to produce the given result
from the given dependency."""
pass
class FunGen(Gen):
"""A simple recipe generator from a function."""
fun = None
def __init__(self, res, dep, fun):
Gen.__init__(self, res, dep)
self.fun = fun
def gen(self, res, dep):
return FunRecipe(self.fun, [res], [dep])
def gen(dir, rext, dep):
"""Generate recipes to build res. A generation string is found between
file src and res. Each intermediate file has for name the kernel of res
(generated files will be put in the res directory). """
dir = env.Path(dir)
dep = env.Path(dep)
# prepare the kernel
b = dep.get_base()
dext = dep.get_ext()
#b, dext = os.path.splitext(dep)
#_, n = os.path.split(b)
n = b.get_file()
kern = dir / n #os.path.join(dir, n)
# initialize lookup process
if not ext_db.has_key(dext):
io.DEF.print_error("don't know how to build '%s' from '%s'" % (rext, dep))
exit(1)
ext = ext_db[dext]
prev = dep
# end when dep is found
while ext.ext <> rext:
gen = ext.gens[rext]
next = kern + gen.res.ext
gen.gen(next, prev)
prev = next
ext = gen.res
# return result
return prev
def fix(path):
"""Fix a path according to the current directory."""
if isinstance(path, list):
return [str(get_file(p)) for p in path]
else:
return str(get_file(path))
class ActionRecipe(Recipe):
"""A recipe that supports an action. object for generation."""
act = None
def __init__(self, ress, deps, actions):
Recipe.__init__(self, ress, deps)
self.act = action.make_actions(actions).instantiate(self)
def action(self, ctx):
if self.act:
self.act.execute(ctx)
def display_action(self, out):
self.act.display(out)
class ActionGen(Gen):
"""A recipe generator supporting simple actions."""
action = None
def __init__(self, res, dep, action):
Gen.__init__(self, res, dep)
self.action = action
def gen(self, res, dep):
return ActionRecipe([res], [dep], self.action)
def rule(ress, deps, *actions):
"""Build a rule with actions."""
ActionRecipe(ress, deps, make_actions(actions))
def goal(goal, deps, actions = action.Action()):
"""Build a goal with the following dependencies."""
path = env.Path(env.cenv.path) / goal
file = get_file(str(path))
if file.recipe:
raise env.ElfError("a goal already named '%s' already exist!" % goal)
else:
file.set_goal()
file.recipe = ActionRecipe(goal, deps, actions)
return
| gpl-3.0 | -4,019,228,434,657,154,600 | 20.632911 | 104 | 0.646723 | false |
wkschwartz/django | tests/gis_tests/test_spatialrefsys.py | 17 | 5332 | import re
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.functional import cached_property
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ?',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
'wkt': re.sub(r'[\s+]', '', """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
""")
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj_re': r'\+proj=lcc (\+lat_1=30.28333333333333? |\+lat_2=28.38333333333333? |\+lat_0=27.83333333333333? |'
r'\+lon_0=-99 ){4}\+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ?',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(TestCase):
@cached_property
def SpatialRefSys(self):
return connection.ops.connection.ops.spatial_ref_sys()
def test_get_units(self):
epsg_4326 = next(f for f in test_srs if f['srid'] == 4326)
unit, unit_name = self.SpatialRefSys().get_units(epsg_4326['wkt'])
self.assertEqual(unit_name, 'degree')
self.assertAlmostEqual(unit, 0.01745329251994328)
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = self.SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
# Compare case-insensitively because srs.auth_name is lowercase
# ("epsg") on Spatialite.
if not connection.ops.oracle or oracle_flag:
self.assertIs(srs.auth_name.upper().startswith(auth_name), True)
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No PROJ and different srtext on Oracle.
if not connection.ops.oracle:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
self.assertRegex(srs.proj4text, sd['proj_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = self.SpatialRefSys.objects.get(srid=sd['srid'])
self.assertTrue(sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
self.assertIs(sr.name.startswith(sd['name']), True)
# Testing the SpatialReference object directly.
if not connection.ops.oracle:
srs = sr.srs
self.assertRegex(srs.proj, sd['proj_re'])
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = self.SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
self.SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = self.SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
self.SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
| bsd-3-clause | -790,373,884,958,211,500 | 38.496296 | 114 | 0.594336 | false |
orezpraw/gensim | gensim/test/test_miislita.py | 83 | 3928 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module replicates the miislita vector spaces from
"A Linear Algebra Approach to the Vector Space Model -- A Fast Track Tutorial"
by Dr. E. Garcia, [email protected]
See http://www.miislita.com for further details.
"""
from __future__ import division # always use floats
from __future__ import with_statement
import logging
import tempfile
import unittest
import bz2
import os
from gensim import utils, corpora, models, similarities
# sample data files are located in the same folder
module_path = os.path.dirname(__file__)
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
logger = logging.getLogger('test_miislita')
def get_tmpfile(suffix):
return os.path.join(tempfile.gettempdir(), suffix)
class CorpusMiislita(corpora.TextCorpus):
stoplist = set('for a of the and to in on'.split())
def get_texts(self):
"""
Parse documents from the .cor file provided in the constructor. Lowercase
each document and ignore some stopwords.
.cor format: one document per line, words separated by whitespace.
"""
with self.getstream() as stream:
for doc in stream:
yield [word for word in utils.to_unicode(doc).lower().split()
if word not in CorpusMiislita.stoplist]
def __len__(self):
"""Define this so we can use `len(corpus)`"""
if 'length' not in self.__dict__:
logger.info("caching corpus size (calculating number of documents)")
self.length = sum(1 for doc in self.get_texts())
return self.length
class TestMiislita(unittest.TestCase):
def test_textcorpus(self):
"""Make sure TextCorpus can be serialized to disk. """
# construct corpus from file
miislita = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
# make sure serializing works
ftmp = get_tmpfile('test_textcorpus.mm')
corpora.MmCorpus.save_corpus(ftmp, miislita)
self.assertTrue(os.path.exists(ftmp))
# make sure deserializing gives the same result
miislita2 = corpora.MmCorpus(ftmp)
self.assertEqual(list(miislita), list(miislita2))
def test_save_load_ability(self):
"""
Make sure we can save and load (un/pickle) TextCorpus objects (as long
as the underlying input isn't a file-like object; we cannot pickle those).
"""
# construct corpus from file
corpusname = datapath('miIslita.cor')
miislita = CorpusMiislita(corpusname)
# pickle to disk
tmpf = get_tmpfile('tc_test.cpickle')
miislita.save(tmpf)
miislita2 = CorpusMiislita.load(tmpf)
self.assertEqual(len(miislita), len(miislita2))
self.assertEqual(miislita.dictionary.token2id, miislita2.dictionary.token2id)
def test_miislita_high_level(self):
# construct corpus from file
miislita = CorpusMiislita(datapath('miIslita.cor'))
# initialize tfidf transformation and similarity index
tfidf = models.TfidfModel(miislita, miislita.dictionary, normalize=False)
index = similarities.SparseMatrixSimilarity(tfidf[miislita], num_features=len(miislita.dictionary))
# compare to query
query = 'latent semantic indexing'
vec_bow = miislita.dictionary.doc2bow(query.lower().split())
vec_tfidf = tfidf[vec_bow]
# perform a similarity query against the corpus
sims_tfidf = index[vec_tfidf]
# for the expected results see the article
expected = [0.0, 0.2560, 0.7022, 0.1524, 0.3334]
for i, value in enumerate(expected):
self.assertAlmostEqual(sims_tfidf[i], value, 2)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-3.0 | -6,077,943,060,901,426,000 | 31.733333 | 107 | 0.66166 | false |
gangadharkadam/v4_erp | erpnext/setup/doctype/backup_manager/backup_dropbox.py | 41 | 4776 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# SETUP:
# install pip install --upgrade dropbox
#
# Create new Dropbox App
#
# in conf.py, set oauth2 settings
# dropbox_access_key
# dropbox_access_secret
from __future__ import unicode_literals
import os
import frappe
from frappe.utils import get_request_site_address, cstr
from frappe import _
@frappe.whitelist()
def get_dropbox_authorize_url():
sess = get_dropbox_session()
request_token = sess.obtain_request_token()
return_address = get_request_site_address(True) \
+ "?cmd=erpnext.setup.doctype.backup_manager.backup_dropbox.dropbox_callback"
url = sess.build_authorize_url(request_token, return_address)
return {
"url": url,
"key": request_token.key,
"secret": request_token.secret,
}
@frappe.whitelist(allow_guest=True)
def dropbox_callback(oauth_token=None, not_approved=False):
from dropbox import client
if not not_approved:
if frappe.db.get_value("Backup Manager", None, "dropbox_access_key")==oauth_token:
allowed = 1
message = "Dropbox access allowed."
sess = get_dropbox_session()
sess.set_request_token(frappe.db.get_value("Backup Manager", None, "dropbox_access_key"),
frappe.db.get_value("Backup Manager", None, "dropbox_access_secret"))
access_token = sess.obtain_access_token()
frappe.db.set_value("Backup Manager", "Backup Manager", "dropbox_access_key", access_token.key)
frappe.db.set_value("Backup Manager", "Backup Manager", "dropbox_access_secret", access_token.secret)
frappe.db.set_value("Backup Manager", "Backup Manager", "dropbox_access_allowed", allowed)
dropbox_client = client.DropboxClient(sess)
try:
dropbox_client.file_create_folder("files")
except:
pass
else:
allowed = 0
message = "Illegal Access Token Please try again."
else:
allowed = 0
message = "Dropbox Access not approved."
frappe.local.message_title = "Dropbox Approval"
frappe.local.message = "<h3>%s</h3><p>Please close this window.</p>" % message
if allowed:
frappe.local.message_success = True
frappe.db.commit()
frappe.response['type'] = 'page'
frappe.response['page_name'] = 'message.html'
def backup_to_dropbox():
from dropbox import client, session
from frappe.utils.backups import new_backup
from frappe.utils import get_files_path, get_backups_path
if not frappe.db:
frappe.connect()
sess = session.DropboxSession(frappe.conf.dropbox_access_key, frappe.conf.dropbox_secret_key, "app_folder")
sess.set_token(frappe.db.get_value("Backup Manager", None, "dropbox_access_key"),
frappe.db.get_value("Backup Manager", None, "dropbox_access_secret"))
dropbox_client = client.DropboxClient(sess)
# upload database
backup = new_backup()
filename = os.path.join(get_backups_path(), os.path.basename(backup.backup_path_db))
upload_file_to_dropbox(filename, "/database", dropbox_client)
frappe.db.close()
response = dropbox_client.metadata("/files")
# upload files to files folder
did_not_upload = []
error_log = []
path = get_files_path()
for filename in os.listdir(path):
filename = cstr(filename)
found = False
filepath = os.path.join(path, filename)
for file_metadata in response["contents"]:
if os.path.basename(filepath) == os.path.basename(file_metadata["path"]) and os.stat(filepath).st_size == int(file_metadata["bytes"]):
found = True
break
if not found:
try:
upload_file_to_dropbox(filepath, "/files", dropbox_client)
except Exception:
did_not_upload.append(filename)
error_log.append(frappe.get_traceback())
frappe.connect()
return did_not_upload, list(set(error_log))
def get_dropbox_session():
try:
from dropbox import session
except:
frappe.msgprint(_("Please install dropbox python module"), raise_exception=1)
if not (frappe.conf.dropbox_access_key or frappe.conf.dropbox_secret_key):
frappe.throw(_("Please set Dropbox access keys in your site config"))
sess = session.DropboxSession(frappe.conf.dropbox_access_key, frappe.conf.dropbox_secret_key, "app_folder")
return sess
def upload_file_to_dropbox(filename, folder, dropbox_client):
from dropbox import rest
size = os.stat(filename).st_size
with open(filename, 'r') as f:
# if max packet size reached, use chunked uploader
max_packet_size = 4194304
if size > max_packet_size:
uploader = dropbox_client.get_chunked_uploader(f, size)
while uploader.offset < size:
try:
uploader.upload_chunked()
uploader.finish(folder + "/" + os.path.basename(filename), overwrite=True)
except rest.ErrorResponse:
pass
else:
dropbox_client.put_file(folder + "/" + os.path.basename(filename), f, overwrite=True)
if __name__=="__main__":
backup_to_dropbox() | agpl-3.0 | 8,127,670,572,553,388,000 | 31.060403 | 138 | 0.718802 | false |
btnpushnmunky/cupcake | monsters.py | 1 | 1918 | import pygame
import os
from random import randint
UP = 3
DOWN = 7
RIGHT = 5
LEFT = 9
EXEC_DIR = os.path.dirname(__file__)
class Monster(pygame.sprite.Sprite):
""" This is our main monster class """
def __init__(self, initial_position, type, direction):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(type + '.png')
self.rect = self.image.get_rect()
self.rect.topleft = initial_position
self.next_update_time = 0
self.bottom = self.rect.bottom
self.top = self.rect.top
self.right = self.rect.right
self.left = self.rect.left
self.direction = direction
self.type = type
self.speed = randint(1,5)
def update(self, plane, bounds):
self.top = self.rect.top
self.left = self.rect.left
self.right = self.rect.right
self.bottom = self.rect.bottom
if plane == 'horizontal':
if self.direction == RIGHT:
self.rect.left += 1 * self.speed
if self.right > bounds:
self.reverse()
elif self.direction == LEFT:
self.rect.left -= 1 * self.speed
if self.left < 0:
self.reverse()
elif plane == 'vertical':
if self.direction == UP:
self.rect.top -= 1 * self.speed
if self.top < 30:
self.reverse()
elif self.direction == DOWN:
self.rect.top += 1 * self.speed
if self.bottom > bounds:
self.reverse()
def reverse(self):
if self.direction == RIGHT:
self.direction = LEFT
elif self.direction == LEFT:
self.direction = RIGHT
elif self.direction == UP:
self.direction = DOWN
elif self.direction == DOWN:
self.direction = UP
| mit | -746,457,573,432,719,700 | 30.966667 | 58 | 0.529718 | false |
julien78910/CouchPotatoServer | libs/rtorrent/rpc/__init__.py | 158 | 10775 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import inspect
import rtorrent
import re
from rtorrent.common import bool_to_int, convert_version_tuple_to_str,\
safe_repr
from rtorrent.err import MethodError
from rtorrent.compat import xmlrpclib
def get_varname(rpc_call):
"""Transform rpc method into variable name.
@newfield example: Example
@example: if the name of the rpc method is 'p.get_down_rate', the variable
name will be 'down_rate'
"""
# extract variable name from xmlrpc func name
r = re.search(
"([ptdf]\.|system\.|get\_|is\_|set\_)+([^=]*)", rpc_call, re.I)
if r:
return(r.groups()[-1])
else:
return(None)
def _handle_unavailable_rpc_method(method, rt_obj):
msg = "Method isn't available."
if rt_obj._get_client_version_tuple() < method.min_version:
msg = "This method is only available in " \
"RTorrent version v{0} or later".format(
convert_version_tuple_to_str(method.min_version))
raise MethodError(msg)
class DummyClass:
def __init__(self):
pass
class Method:
"""Represents an individual RPC method"""
def __init__(self, _class, method_name,
rpc_call, docstring=None, varname=None, **kwargs):
self._class = _class # : Class this method is associated with
self.class_name = _class.__name__
self.method_name = method_name # : name of public-facing method
self.rpc_call = rpc_call # : name of rpc method
self.docstring = docstring # : docstring for rpc method (optional)
self.varname = varname # : variable for the result of the method call, usually set to self.varname
self.min_version = kwargs.get("min_version", (
0, 0, 0)) # : Minimum version of rTorrent required
self.boolean = kwargs.get("boolean", False) # : returns boolean value?
self.post_process_func = kwargs.get(
"post_process_func", None) # : custom post process function
self.aliases = kwargs.get(
"aliases", []) # : aliases for method (optional)
self.required_args = []
#: Arguments required when calling the method (not utilized)
self.method_type = self._get_method_type()
if self.varname is None:
self.varname = get_varname(self.rpc_call)
assert self.varname is not None, "Couldn't get variable name."
def __repr__(self):
return safe_repr("Method(method_name='{0}', rpc_call='{1}')",
self.method_name, self.rpc_call)
def _get_method_type(self):
"""Determine whether method is a modifier or a retriever"""
if self.method_name[:4] == "set_": return('m') # modifier
else:
return('r') # retriever
def is_modifier(self):
if self.method_type == 'm':
return(True)
else:
return(False)
def is_retriever(self):
if self.method_type == 'r':
return(True)
else:
return(False)
def is_available(self, rt_obj):
if rt_obj._get_client_version_tuple() < self.min_version or \
self.rpc_call not in rt_obj._get_rpc_methods():
return(False)
else:
return(True)
class Multicall:
def __init__(self, class_obj, **kwargs):
self.class_obj = class_obj
if class_obj.__class__.__name__ == "RTorrent":
self.rt_obj = class_obj
else:
self.rt_obj = class_obj._rt_obj
self.calls = []
def add(self, method, *args):
"""Add call to multicall
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
@param args: call arguments
"""
# if a raw rpc method was given instead of a Method instance,
# try and find the instance for it. And if all else fails, create a
# dummy Method instance
if isinstance(method, str):
result = find_method(method)
# if result not found
if result == -1:
method = Method(DummyClass, method, method)
else:
method = result
# ensure method is available before adding
if not method.is_available(self.rt_obj):
_handle_unavailable_rpc_method(method, self.rt_obj)
self.calls.append((method, args))
def list_calls(self):
for c in self.calls:
print(c)
def call(self):
"""Execute added multicall calls
@return: the results (post-processed), in the order they were added
@rtype: tuple
"""
m = xmlrpclib.MultiCall(self.rt_obj._get_conn())
for call in self.calls:
method, args = call
rpc_call = getattr(method, "rpc_call")
getattr(m, rpc_call)(*args)
results = m()
results = tuple(results)
results_processed = []
for r, c in zip(results, self.calls):
method = c[0] # Method instance
result = process_result(method, r)
results_processed.append(result)
# assign result to class_obj
exists = hasattr(self.class_obj, method.varname)
if not exists or not inspect.ismethod(getattr(self.class_obj, method.varname)):
setattr(self.class_obj, method.varname, result)
return(tuple(results_processed))
def call_method(class_obj, method, *args):
"""Handles single RPC calls
@param class_obj: Peer/File/Torrent/Tracker/RTorrent instance
@type class_obj: object
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
"""
if method.is_retriever():
args = args[:-1]
else:
assert args[-1] is not None, "No argument given."
if class_obj.__class__.__name__ == "RTorrent":
rt_obj = class_obj
else:
rt_obj = class_obj._rt_obj
# check if rpc method is even available
if not method.is_available(rt_obj):
_handle_unavailable_rpc_method(method, rt_obj)
m = Multicall(class_obj)
m.add(method, *args)
# only added one method, only getting one result back
ret_value = m.call()[0]
####### OBSOLETE ##########################################################
# if method.is_retriever():
# #value = process_result(method, ret_value)
# value = ret_value #MultiCall already processed the result
# else:
# # we're setting the user's input to method.varname
# # but we'll return the value that xmlrpc gives us
# value = process_result(method, args[-1])
##########################################################################
return(ret_value)
def find_method(rpc_call):
"""Return L{Method} instance associated with given RPC call"""
method_lists = [
rtorrent.methods,
rtorrent.file.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
rtorrent.torrent.methods,
]
for l in method_lists:
for m in l:
if m.rpc_call.lower() == rpc_call.lower():
return(m)
return(-1)
def process_result(method, result):
"""Process given C{B{result}} based on flags set in C{B{method}}
@param method: L{Method} instance
@type method: Method
@param result: result to be processed (the result of given L{Method} instance)
@note: Supported Processing:
- boolean - convert ones and zeros returned by rTorrent and
convert to python boolean values
"""
# handle custom post processing function
if method.post_process_func is not None:
result = method.post_process_func(result)
# is boolean?
if method.boolean:
if result in [1, '1']:
result = True
elif result in [0, '0']:
result = False
return(result)
def _build_rpc_methods(class_, method_list):
"""Build glorified aliases to raw RPC methods"""
instance = None
if not inspect.isclass(class_):
instance = class_
class_ = instance.__class__
for m in method_list:
class_name = m.class_name
if class_name != class_.__name__:
continue
if class_name == "RTorrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, bool_to_int(arg))
elif class_name == "Torrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name in ["Tracker", "File"]:
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Peer":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Group":
caller = lambda arg = None, method = m: \
call_method(instance, method, bool_to_int(arg))
if m.docstring is None:
m.docstring = ""
# print(m)
docstring = """{0}
@note: Variable where the result for this method is stored: {1}.{2}""".format(
m.docstring,
class_name,
m.varname)
caller.__doc__ = docstring
for method_name in [m.method_name] + list(m.aliases):
if instance is None:
setattr(class_, method_name, caller)
else:
setattr(instance, method_name, caller)
| gpl-3.0 | -7,227,469,817,826,214,000 | 32.777429 | 107 | 0.587935 | false |
amir-qayyum-khan/edx-platform | common/djangoapps/request_cache/middleware.py | 9 | 3741 | """
An implementation of a RequestCache. This cache is reset at the beginning
and end of every request.
"""
import crum
import threading
class _RequestCache(threading.local):
"""
A thread-local for storing the per-request cache.
"""
def __init__(self):
super(_RequestCache, self).__init__()
self.data = {}
REQUEST_CACHE = _RequestCache()
class RequestCache(object):
@classmethod
def get_request_cache(cls, name=None):
"""
This method is deprecated. Please use :func:`request_cache.get_cache`.
"""
if name is None:
return REQUEST_CACHE
else:
return REQUEST_CACHE.data.setdefault(name, {})
@classmethod
def get_current_request(cls):
"""
This method is deprecated. Please use :func:`request_cache.get_request`.
"""
return crum.get_current_request()
@classmethod
def clear_request_cache(cls):
"""
Empty the request cache.
"""
REQUEST_CACHE.data = {}
def process_request(self, request):
self.clear_request_cache()
return None
def process_response(self, request, response):
self.clear_request_cache()
return response
def process_exception(self, request, exception): # pylint: disable=unused-argument
"""
Clear the RequestCache after a failed request.
"""
self.clear_request_cache()
return None
def request_cached(f):
"""
A decorator for wrapping a function and automatically handles caching its return value, as well as returning
that cached value for subsequent calls to the same function, with the same parameters, within a given request.
Notes:
- we convert arguments and keyword arguments to their string form to build the cache key, so if you have
args/kwargs that can't be converted to strings, you're gonna have a bad time (don't do it)
- cache key cardinality depends on the args/kwargs, so if you're caching a function that takes five arguments,
you might have deceptively low cache efficiency. prefer function with fewer arguments.
- we use the default request cache, not a named request cache (this shouldn't matter, but just mentioning it)
- benchmark, benchmark, benchmark! if you never measure, how will you know you've improved? or regressed?
Arguments:
f (func): the function to wrap
Returns:
func: a wrapper function which will call the wrapped function, passing in the same args/kwargs,
cache the value it returns, and return that cached value for subsequent calls with the
same args/kwargs within a single request
"""
def wrapper(*args, **kwargs):
"""
Wrapper function to decorate with.
"""
# Build our cache key based on the module the function belongs to, the functions name, and a stringified
# list of arguments and a query string-style stringified list of keyword arguments.
converted_args = map(str, args)
converted_kwargs = map(str, reduce(list.__add__, map(list, sorted(kwargs.iteritems())), []))
cache_keys = [f.__module__, f.func_name] + converted_args + converted_kwargs
cache_key = '.'.join(cache_keys)
# Check to see if we have a result in cache. If not, invoke our wrapped
# function. Cache and return the result to the caller.
rcache = RequestCache.get_request_cache()
if cache_key in rcache.data:
return rcache.data.get(cache_key)
else:
result = f(*args, **kwargs)
rcache.data[cache_key] = result
return result
return wrapper
| agpl-3.0 | 6,397,353,112,441,866,000 | 33.638889 | 118 | 0.64047 | false |
pikeBishop/OMP_gpxReport | examples/geopy/geocoders/opencage.py | 13 | 7091 | """
:class:`.OpenCage` is the Opencagedata geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("OpenCage", )
class OpenCage(Geocoder):
"""
Geocoder using the Open Cage Data API. Documentation at:
http://geocoder.opencagedata.com/api.html
..versionadded:: 1.1.0
"""
def __init__(
self,
api_key,
domain='api.opencagedata.com',
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
): # pylint: disable=R0913
"""
Initialize a customized Open Cage Data geocoder.
:param string api_key: The API key required by Open Cage Data
to perform geocoding requests. You can get your key here:
https://developer.opencagedata.com/
:param string domain: Currently it is 'api.opencagedata.com', can
be changed for testing purposes.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(OpenCage, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies
)
self.api_key = api_key
self.domain = domain.strip('/')
self.scheme = scheme
self.api = '%s://%s/geocode/v1/json' % (self.scheme, self.domain)
def geocode(
self,
query,
bounds=None,
country=None,
language=None,
exactly_one=True,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The query string to be geocoded; this must
be URL encoded.
:param string language: an IETF format language code (such as `es`
for Spanish or pt-BR for Brazilian Portuguese); if this is
omitted a code of `en` (English) will be assumed by the remote
service.
:param string bounds: Provides the geocoder with a hint to the region
that the query resides in. This value will help the geocoder
but will not restrict the possible results to the supplied
region. The bounds parameter should be specified as 4
coordinate points forming the south-west and north-east
corners of a bounding box. For example,
`bounds=-0.563160,51.280430,0.278970,51.683979`.
:param string country: Provides the geocoder with a hint to the
country that the query resides in. This value will help the
geocoder but will not restrict the possible results to the
supplied country. The country code is a 3 character code as
defined by the ISO 3166-1 Alpha 3 standard.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self.format_string % query,
}
if bounds:
params['bounds'] = bounds
if bounds:
params['language'] = language
if bounds:
params['country'] = country
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
language=None,
exactly_one=False,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param string language: The language in which to return results.
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self._coerce_point_to_string(query),
}
if language:
params['language'] = language
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted')
latitude = place['geometry']['lat']
longitude = place['geometry']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
status_code = status['code']
if status_code == 429:
# Rate limit exceeded
raise GeocoderQuotaExceeded(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.'
)
if status_code == 200:
# When there are no results, just return.
return
if status_code == 403:
raise GeocoderQueryError(
'Your request was denied.'
)
else:
raise GeocoderQueryError('Unknown error.')
| gpl-2.0 | -2,919,232,206,359,233,500 | 33.590244 | 79 | 0.5813 | false |
atiqueahmedziad/addons-server | src/olympia/legacy_discovery/views.py | 2 | 2723 | from django.db.transaction import non_atomic_requests
from django.forms.models import modelformset_factory
from django.shortcuts import redirect
from olympia import amo
from olympia.amo.utils import render
from olympia.zadmin.decorators import admin_required
from .forms import DiscoveryModuleForm
from .models import DiscoveryModule
from .modules import registry as module_registry
@non_atomic_requests
def promos(request, context, version, platform, compat_mode='strict'):
if platform:
platform = platform.lower()
platform = amo.PLATFORM_DICT.get(platform, amo.PLATFORM_ALL)
modules = get_modules(request, platform.api_name, version)
return render(request, 'addons/impala/homepage_promos.html',
{'modules': modules})
def get_modules(request, platform, version):
lang = request.LANG
qs = DiscoveryModule.objects.filter(app=request.APP.id)
# Remove any modules without a registered backend or an ordering.
modules = [m for m in qs
if m.module in module_registry and m.ordering is not None]
# Remove modules that specify a locales string we're not part of.
modules = [m for m in modules
if not m.locales or lang in m.locales.split()]
modules = sorted(modules, key=lambda x: x.ordering)
return [module_registry[m.module](request, platform, version)
for m in modules]
@admin_required
@non_atomic_requests
def module_admin(request):
APP = request.APP
# Custom sorting to drop ordering=NULL objects to the bottom.
qs = DiscoveryModule.objects.raw("""
SELECT * from discovery_modules WHERE app_id = %s
ORDER BY ordering IS NULL, ordering""", [APP.id])
qs.ordered = True # The formset looks for this.
_sync_db_and_registry(qs, APP.id)
Form = modelformset_factory(DiscoveryModule, form=DiscoveryModuleForm,
can_delete=True, extra=0)
formset = Form(request.POST or None, queryset=qs)
if request.method == 'POST' and formset.is_valid():
formset.save()
return redirect('discovery.module_admin')
return render(
request, 'legacy_discovery/module_admin.html', {'formset': formset})
def _sync_db_and_registry(qs, app_id):
"""Match up the module registry and DiscoveryModule rows in the db."""
existing = dict((m.module, m) for m in qs)
to_add = [m for m in module_registry if m not in existing]
to_delete = [m for m in existing if m not in module_registry]
for m in to_add:
DiscoveryModule.objects.get_or_create(module=m, app=app_id)
DiscoveryModule.objects.filter(module__in=to_delete, app=app_id).delete()
if to_add or to_delete:
qs._result_cache = None
| bsd-3-clause | 1,007,125,105,423,562,500 | 37.9 | 77 | 0.691149 | false |
TomBaxter/osf.io | osf/models/tag.py | 28 | 1187 | from django.db import models
from .base import BaseModel
class TagManager(models.Manager):
"""Manager that filters out system tags by default.
"""
def get_queryset(self):
return super(TagManager, self).get_queryset().filter(system=False)
class Tag(BaseModel):
name = models.CharField(db_index=True, max_length=1024)
system = models.BooleanField(default=False)
objects = TagManager()
all_tags = models.Manager()
def __unicode__(self):
if self.system:
return 'System Tag: {}'.format(self.name)
return u'{}'.format(self.name)
def _natural_key(self):
return hash(self.name + str(self.system))
@property
def _id(self):
return self.name.lower()
@classmethod
def load(cls, data, system=False):
"""For compatibility with v1: the tag name used to be the _id,
so we make Tag.load('tagname') work as if `name` were the primary key.
"""
try:
return cls.all_tags.get(system=system, name=data)
except cls.DoesNotExist:
return None
class Meta:
unique_together = ('name', 'system')
ordering = ('name', )
| apache-2.0 | -1,173,475,066,900,773,400 | 25.977273 | 78 | 0.614153 | false |
rprata/boost | tools/build/src/util/set.py | 49 | 1240 | # (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
from utility import to_seq
def difference (b, a):
""" Returns the elements of B that are not in A.
"""
result = []
for element in b:
if not element in a:
result.append (element)
return result
def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
result = []
for v in set1:
if v in set2:
result.append (v)
return result
def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True
def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
return contains (a, b) and contains (b, a)
| gpl-2.0 | 7,047,256,434,928,357,000 | 28.52381 | 92 | 0.631452 | false |
Lujeni/ansible | lib/ansible/modules/network/cloudengine/ce_interface_ospf.py | 8 | 30951 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_interface_ospf
version_added: "2.4"
short_description: Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches.
description:
- Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
interface:
description:
- Full name of interface, i.e. 40GE1/0/10.
required: true
process_id:
description:
- Specifies a process ID.
The value is an integer ranging from 1 to 4294967295.
required: true
area:
description:
- Ospf area associated with this ospf process.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer between 1 and 4294967295.
required: true
cost:
description:
- The cost associated with this interface.
Valid values are an integer in the range from 1 to 65535.
hello_interval:
description:
- Time between sending successive hello packets.
Valid values are an integer in the range from 1 to 65535.
dead_interval:
description:
- Time interval an ospf neighbor waits for a hello
packet before tearing down adjacencies. Valid values are an
integer in the range from 1 to 235926000.
silent_interface:
description:
- Setting to true will prevent this interface from receiving
HELLO packets. Valid values are 'true' and 'false'.
type: bool
default: 'no'
auth_mode:
description:
- Specifies the authentication type.
choices: ['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple']
auth_text_simple:
description:
- Specifies a password for simple authentication.
The value is a string of 1 to 8 characters.
auth_key_id:
description:
- Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5.
Valid value is an integer is in the range from 1 to 255.
auth_text_md5:
description:
- Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication.
The value is a string of 1 to 255 case-sensitive characters, spaces not supported.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
"""
EXAMPLES = '''
- name: eth_trunk module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Enables OSPF and sets the cost on an interface
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
cost: 100
provider: '{{ cli }}'
- name: Sets the dead interval of the OSPF neighbor
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
dead_interval: 100
provider: '{{ cli }}'
- name: Sets the interval for sending Hello packets on an interface
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
hello_interval: 2
provider: '{{ cli }}'
- name: Disables an interface from receiving and sending OSPF packets
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
silent_interface: true
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", "cost": "100"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30",
"cost": "100", "dead_interval": "40", "hello_interval": "10",
"silent_interface": "false", "auth_mode": "none"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface 10GE1/0/30",
"ospf enable 1 area 0.0.0.100",
"ospf cost 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_OSPF = """
<filter type="subtree">
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<routerId></routerId>
<vrfName></vrfName>
<areas>
<area>
<areaId>%s</areaId>
<interfaces>
<interface>
<ifName>%s</ifName>
<networkType></networkType>
<helloInterval></helloInterval>
<deadInterval></deadInterval>
<silentEnable></silentEnable>
<configCost></configCost>
<authenticationMode></authenticationMode>
<authTextSimple></authTextSimple>
<keyId></keyId>
<authTextMd5></authTextMd5>
</interface>
</interfaces>
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</filter>
"""
CE_NC_XML_BUILD_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<areas>
<area>
<areaId>%s</areaId>
%s
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_XML_BUILD_MERGE_INTF = """
<interfaces>
<interface operation="merge">
%s
</interface>
</interfaces>
"""
CE_NC_XML_BUILD_DELETE_INTF = """
<interfaces>
<interface operation="delete">
%s
</interface>
</interfaces>
"""
CE_NC_XML_SET_IF_NAME = """
<ifName>%s</ifName>
"""
CE_NC_XML_SET_HELLO = """
<helloInterval>%s</helloInterval>
"""
CE_NC_XML_SET_DEAD = """
<deadInterval>%s</deadInterval>
"""
CE_NC_XML_SET_SILENT = """
<silentEnable>%s</silentEnable>
"""
CE_NC_XML_SET_COST = """
<configCost>%s</configCost>
"""
CE_NC_XML_SET_AUTH_MODE = """
<authenticationMode>%s</authenticationMode>
"""
CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """
<authTextSimple>%s</authTextSimple>
"""
CE_NC_XML_SET_AUTH_MD5 = """
<keyId>%s</keyId>
<authTextMd5>%s</authTextMd5>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
return 'ge'
elif interface.upper().startswith('10GE'):
return '10ge'
elif interface.upper().startswith('25GE'):
return '25ge'
elif interface.upper().startswith('4X10GE'):
return '4x10ge'
elif interface.upper().startswith('40GE'):
return '40ge'
elif interface.upper().startswith('100GE'):
return '100ge'
elif interface.upper().startswith('VLANIF'):
return 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
return 'loopback'
elif interface.upper().startswith('METH'):
return 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
return 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
return 'vbdif'
elif interface.upper().startswith('NVE'):
return 'nve'
elif interface.upper().startswith('TUNNEL'):
return 'tunnel'
elif interface.upper().startswith('ETHERNET'):
return 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
return 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
return 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
return 'stack-port'
elif interface.upper().startswith('NULL'):
return 'null'
else:
return None
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if not addr:
return False
if addr.find('.') != -1:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
class InterfaceOSPF(object):
"""
Manages configuration of an OSPF interface instance.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.interface = self.module.params['interface']
self.process_id = self.module.params['process_id']
self.area = self.module.params['area']
self.cost = self.module.params['cost']
self.hello_interval = self.module.params['hello_interval']
self.dead_interval = self.module.params['dead_interval']
self.silent_interface = self.module.params['silent_interface']
self.auth_mode = self.module.params['auth_mode']
self.auth_text_simple = self.module.params['auth_text_simple']
self.auth_key_id = self.module.params['auth_key_id']
self.auth_text_md5 = self.module.params['auth_text_md5']
self.state = self.module.params['state']
# ospf info
self.ospf_info = dict()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_area_ip(self):
"""convert integer to ip address"""
if not self.area.isdigit():
return self.area
addr_int = ['0'] * 4
addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF)
addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF)
addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF)
addr_int[3] = str(int(self.area) & 0xFF)
return '.'.join(addr_int)
def get_ospf_dict(self):
""" get one ospf attributes dict."""
ospf_info = dict()
conf_str = CE_NC_GET_OSPF % (
self.process_id, self.get_area_ip(), self.interface)
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return ospf_info
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get process base info
root = ElementTree.fromstring(xml_str)
ospfsite = root.find("ospfv2/ospfv2comm/ospfSites/ospfSite")
if not ospfsite:
self.module.fail_json(msg="Error: ospf process does not exist.")
for site in ospfsite:
if site.tag in ["processId", "routerId", "vrfName"]:
ospf_info[site.tag] = site.text
# get areas info
ospf_info["areaId"] = ""
areas = root.find(
"ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area")
if areas:
for area in areas:
if area.tag == "areaId":
ospf_info["areaId"] = area.text
break
# get interface info
ospf_info["interface"] = dict()
intf = root.find(
"ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area/interfaces/interface")
if intf:
for attr in intf:
if attr.tag in ["ifName", "networkType",
"helloInterval", "deadInterval",
"silentEnable", "configCost",
"authenticationMode", "authTextSimple",
"keyId", "authTextMd5"]:
ospf_info["interface"][attr.tag] = attr.text
return ospf_info
def set_ospf_interface(self):
"""set interface ospf enable, and set its ospf attributes"""
xml_intf = CE_NC_XML_SET_IF_NAME % self.interface
# ospf view
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
if self.silent_interface:
xml_intf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower()
if self.silent_interface:
self.updates_cmd.append("silent-interface %s" % self.interface)
else:
self.updates_cmd.append("undo silent-interface %s" % self.interface)
# interface view
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("ospf enable %s area %s" % (
self.process_id, self.get_area_ip()))
if self.cost:
xml_intf += CE_NC_XML_SET_COST % self.cost
self.updates_cmd.append("ospf cost %s" % self.cost)
if self.hello_interval:
xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval
self.updates_cmd.append("ospf timer hello %s" %
self.hello_interval)
if self.dead_interval:
xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval
self.updates_cmd.append("ospf timer dead %s" % self.dead_interval)
if self.auth_mode:
xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo ospf authentication-mode")
else:
self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s"
% (self.auth_mode, self.auth_text_simple))
elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id:
xml_intf += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s %s"
% (self.auth_mode, self.auth_key_id, self.auth_text_md5))
else:
pass
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
(CE_NC_XML_BUILD_MERGE_INTF % xml_intf))
self.netconf_set_config(xml_str, "SET_INTERFACE_OSPF")
self.changed = True
def merge_ospf_interface(self):
"""merge interface ospf attributes"""
intf_dict = self.ospf_info["interface"]
# ospf view
xml_ospf = ""
if intf_dict.get("silentEnable") != str(self.silent_interface).lower():
xml_ospf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower()
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
if self.silent_interface:
self.updates_cmd.append("silent-interface %s" % self.interface)
else:
self.updates_cmd.append("undo silent-interface %s" % self.interface)
# interface view
xml_intf = ""
self.updates_cmd.append("interface %s" % self.interface)
if self.cost and intf_dict.get("configCost") != self.cost:
xml_intf += CE_NC_XML_SET_COST % self.cost
self.updates_cmd.append("ospf cost %s" % self.cost)
if self.hello_interval and intf_dict.get("helloInterval") != self.hello_interval:
xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval
self.updates_cmd.append("ospf timer hello %s" %
self.hello_interval)
if self.dead_interval and intf_dict.get("deadInterval") != self.dead_interval:
xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval
self.updates_cmd.append("ospf timer dead %s" % self.dead_interval)
if self.auth_mode:
# NOTE: for security, authentication config will always be update
xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo ospf authentication-mode")
else:
self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s"
% (self.auth_mode, self.auth_text_simple))
elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id:
xml_intf += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s %s"
% (self.auth_mode, self.auth_key_id, self.auth_text_md5))
else:
pass
if not xml_intf:
self.updates_cmd.pop() # remove command: interface
if not xml_ospf and not xml_intf:
return
xml_sum = CE_NC_XML_SET_IF_NAME % self.interface
xml_sum += xml_ospf + xml_intf
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
(CE_NC_XML_BUILD_MERGE_INTF % xml_sum))
self.netconf_set_config(xml_str, "MERGE_INTERFACE_OSPF")
self.changed = True
def unset_ospf_interface(self):
"""set interface ospf disable, and all its ospf attributes will be removed"""
intf_dict = self.ospf_info["interface"]
xml_sum = ""
xml_intf = CE_NC_XML_SET_IF_NAME % self.interface
if intf_dict.get("silentEnable") == "true":
xml_sum += CE_NC_XML_BUILD_MERGE_INTF % (
xml_intf + (CE_NC_XML_SET_SILENT % "false"))
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
self.updates_cmd.append(
"undo silent-interface %s" % self.interface)
xml_sum += CE_NC_XML_BUILD_DELETE_INTF % xml_intf
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
xml_sum)
self.netconf_set_config(xml_str, "DELETE_INTERFACE_OSPF")
self.updates_cmd.append("undo ospf cost")
self.updates_cmd.append("undo ospf timer hello")
self.updates_cmd.append("undo ospf timer dead")
self.updates_cmd.append("undo ospf authentication-mode")
self.updates_cmd.append("undo ospf enable %s area %s" % (
self.process_id, self.get_area_ip()))
self.changed = True
def check_params(self):
"""Check all input params"""
self.interface = self.interface.replace(" ", "").upper()
# interface check
if not get_interface_type(self.interface):
self.module.fail_json(msg="Error: interface is invalid.")
# process_id check
if not self.process_id.isdigit():
self.module.fail_json(msg="Error: process_id is not digit.")
if int(self.process_id) < 1 or int(self.process_id) > 4294967295:
self.module.fail_json(msg="Error: process_id must be an integer between 1 and 4294967295.")
# area check
if self.area.isdigit():
if int(self.area) < 0 or int(self.area) > 4294967295:
self.module.fail_json(msg="Error: area id (Integer) must be between 0 and 4294967295.")
else:
if not is_valid_v4addr(self.area):
self.module.fail_json(msg="Error: area id is invalid.")
# area authentication check
if self.state == "present":
if self.auth_mode:
if self.auth_mode == "simple":
if self.auth_text_simple and len(self.auth_text_simple) > 8:
self.module.fail_json(
msg="Error: auth_text_simple is not in the range from 1 to 8.")
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
if self.auth_key_id and not self.auth_text_md5:
self.module.fail_json(
msg='Error: auth_key_id and auth_text_md5 should be set at the same time.')
if not self.auth_key_id and self.auth_text_md5:
self.module.fail_json(
msg='Error: auth_key_id and auth_text_md5 should be set at the same time.')
if self.auth_key_id:
if not self.auth_key_id.isdigit():
self.module.fail_json(
msg="Error: auth_key_id is not digit.")
if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255:
self.module.fail_json(
msg="Error: auth_key_id is not in the range from 1 to 255.")
if self.auth_text_md5 and len(self.auth_text_md5) > 255:
self.module.fail_json(
msg="Error: auth_text_md5 is not in the range from 1 to 255.")
# cost check
if self.cost:
if not self.cost.isdigit():
self.module.fail_json(msg="Error: cost is not digit.")
if int(self.cost) < 1 or int(self.cost) > 65535:
self.module.fail_json(
msg="Error: cost is not in the range from 1 to 65535")
# hello_interval check
if self.hello_interval:
if not self.hello_interval.isdigit():
self.module.fail_json(
msg="Error: hello_interval is not digit.")
if int(self.hello_interval) < 1 or int(self.hello_interval) > 65535:
self.module.fail_json(
msg="Error: hello_interval is not in the range from 1 to 65535")
# dead_interval check
if self.dead_interval:
if not self.dead_interval.isdigit():
self.module.fail_json(msg="Error: dead_interval is not digit.")
if int(self.dead_interval) < 1 or int(self.dead_interval) > 235926000:
self.module.fail_json(
msg="Error: dead_interval is not in the range from 1 to 235926000")
def get_proposed(self):
"""get proposed info"""
self.proposed["interface"] = self.interface
self.proposed["process_id"] = self.process_id
self.proposed["area"] = self.get_area_ip()
self.proposed["cost"] = self.cost
self.proposed["hello_interval"] = self.hello_interval
self.proposed["dead_interval"] = self.dead_interval
self.proposed["silent_interface"] = self.silent_interface
if self.auth_mode:
self.proposed["auth_mode"] = self.auth_mode
if self.auth_mode == "simple":
self.proposed["auth_text_simple"] = self.auth_text_simple
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
self.proposed["auth_key_id"] = self.auth_key_id
self.proposed["auth_text_md5"] = self.auth_text_md5
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.ospf_info:
return
if self.ospf_info["interface"]:
self.existing["interface"] = self.interface
self.existing["cost"] = self.ospf_info["interface"].get("configCost")
self.existing["hello_interval"] = self.ospf_info["interface"].get("helloInterval")
self.existing["dead_interval"] = self.ospf_info["interface"].get("deadInterval")
self.existing["silent_interface"] = self.ospf_info["interface"].get("silentEnable")
self.existing["auth_mode"] = self.ospf_info["interface"].get("authenticationMode")
self.existing["auth_text_simple"] = self.ospf_info["interface"].get("authTextSimple")
self.existing["auth_key_id"] = self.ospf_info["interface"].get("keyId")
self.existing["auth_text_md5"] = self.ospf_info["interface"].get("authTextMd5")
self.existing["process_id"] = self.ospf_info["processId"]
self.existing["area"] = self.ospf_info["areaId"]
def get_end_state(self):
"""get end state info"""
ospf_info = self.get_ospf_dict()
if not ospf_info:
return
if ospf_info["interface"]:
self.end_state["interface"] = self.interface
self.end_state["cost"] = ospf_info["interface"].get("configCost")
self.end_state["hello_interval"] = ospf_info["interface"].get("helloInterval")
self.end_state["dead_interval"] = ospf_info["interface"].get("deadInterval")
self.end_state["silent_interface"] = ospf_info["interface"].get("silentEnable")
self.end_state["auth_mode"] = ospf_info["interface"].get("authenticationMode")
self.end_state["auth_text_simple"] = ospf_info["interface"].get("authTextSimple")
self.end_state["auth_key_id"] = ospf_info["interface"].get("keyId")
self.end_state["auth_text_md5"] = ospf_info["interface"].get("authTextMd5")
self.end_state["process_id"] = ospf_info["processId"]
self.end_state["area"] = ospf_info["areaId"]
def work(self):
"""worker"""
self.check_params()
self.ospf_info = self.get_ospf_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.state == "present":
if not self.ospf_info or not self.ospf_info["interface"]:
# create ospf area and set interface config
self.set_ospf_interface()
else:
# merge interface ospf area config
self.merge_ospf_interface()
else:
if self.ospf_info and self.ospf_info["interface"]:
# delete interface ospf area config
self.unset_ospf_interface()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
process_id=dict(required=True, type='str'),
area=dict(required=True, type='str'),
cost=dict(required=False, type='str'),
hello_interval=dict(required=False, type='str'),
dead_interval=dict(required=False, type='str'),
silent_interface=dict(required=False, default=False, type='bool'),
auth_mode=dict(required=False,
choices=['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'),
auth_text_simple=dict(required=False, type='str', no_log=True),
auth_key_id=dict(required=False, type='str'),
auth_text_md5=dict(required=False, type='str', no_log=True),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = InterfaceOSPF(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | -5,195,196,876,943,480,000 | 37.932075 | 105 | 0.5594 | false |
christoph-buente/phantomjs | src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_test_utils.py | 227 | 10685 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'gtest_source_dir': os.path.dirname(sys.argv[0]),
'gtest_build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('gtest_source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('gtest_build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --gtest_build_dir flag or the GTEST_BUILD_DIR\n'
'environment variable. For convenient use, invoke this script via\n'
'mk_test.py.\n'
# TODO([email protected]): change mk_test.py to test.py after renaming
# the file.
'Please run mk_test.py -h for help.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause | -2,731,866,854,677,231,000 | 33.579288 | 79 | 0.672157 | false |
signed/intellij-community | python/helpers/py3only/docutils/languages/da.py | 50 | 1872 | # -*- coding: utf-8 -*-
# $Id: da.py 7678 2013-07-03 09:57:36Z milde $
# Author: E D
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Danish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Forfatter',
'authors': 'Forfattere',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Dato',
'copyright': 'Copyright',
'dedication': 'Dedikation',
'abstract': 'Resumé',
'attention': 'Giv agt!',
'caution': 'Pas på!',
'danger': '!FARE!',
'error': 'Fejl',
'hint': 'Vink',
'important': 'Vigtigt',
'note': 'Bemærk',
'tip': 'Tips',
'warning': 'Advarsel',
'contents': 'Indhold'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'forfatter': 'author',
'forfattere': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'dato': 'date',
'copyright': 'copyright',
'dedikation': 'dedication',
'resume': 'abstract',
'resumé': 'abstract'}
"""Danish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 | -5,146,300,755,362,303,000 | 29.129032 | 76 | 0.601178 | false |
iamdankaufman/beets | beetsplug/info.py | 2 | 2210 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Shows file metadata.
"""
import os
from beets.plugins import BeetsPlugin
from beets import ui
from beets import mediafile
from beets import util
def info(paths):
# Set up fields to output.
fields = list(mediafile.MediaFile.fields())
fields.remove('art')
fields.remove('images')
# Line format.
other_fields = ['album art']
maxwidth = max(len(name) for name in fields + other_fields)
lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth)
first = True
for path in paths:
if not first:
ui.print_()
path = util.normpath(path)
if not os.path.isfile(path):
ui.print_(u'not a file: {0}'.format(
util.displayable_path(path)
))
continue
ui.print_(path)
try:
mf = mediafile.MediaFile(path)
except mediafile.UnreadableFileError:
ui.print_('cannot read file: {0}'.format(
util.displayable_path(path)
))
continue
# Basic fields.
for name in fields:
ui.print_(lineformat.format(name, getattr(mf, name)))
# Extra stuff.
ui.print_(lineformat.format('album art', mf.art is not None))
first = False
class InfoPlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand('info', help='show file metadata')
def func(lib, opts, args):
if not args:
raise ui.UserError('no file specified')
info(args)
cmd.func = func
return [cmd]
| mit | -334,954,068,923,745,860 | 28.078947 | 71 | 0.624887 | false |
anaruse/chainer | tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py | 1 | 12854 | import unittest
import functools
import math
import numpy
from operator import mul
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPoolingND(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
# Avoid unstability of numerical gradient
x_shape = (2, 3) + self.dims
self.x = numpy.arange(
functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)
self.x = 2 * self.x / self.x.size - 1
outs = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p)
in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
gy_shape = (2, 3) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, x_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, x_data, use_cudnn='always'):
dims = self.dims
ksize = self.ksize
stride = self.stride
pad = self.pad
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
patches = pooling_nd_helper.pooling_patches(
dims, ksize, stride, pad, self.cover_all)
for i in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[i, c]
expect = numpy.array([x[idx].max() for idx in patches])
expect = expect.reshape(y_data.shape[2:])
testing.assert_allclose(expect, y_data[i, c])
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, use_cudnn='never')
def test_forward_cpu_wide(self): # see #120
ndim = self.ndim
x_shape = (2, 3) + (15,) * ndim
x_data = numpy.random.rand(*x_shape).astype(self.dtype)
x = chainer.Variable(x_data)
ksize = stride = int(math.ceil(pow(32, 1.0 / ndim)))
functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.cudnn
@condition.retry(3)
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_forward_consistency_regression(self, x_data, use_cudnn='always'):
# Regression test to max_pooling_2d.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
testing.assert_allclose(y_nd.data, y_2d.data)
@condition.retry(3)
def test_forward_consistency_regression_cpu(self):
self.check_forward_consistency_regression(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_consistency_regression_gpu(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_consistency_regression_no_cudnn(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, y_grad, use_cudnn='always'):
def f(x):
return functions.max_pooling_nd(
x, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
f, x_data, y_grad, dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_backward_consistency_regression(self, x_data, gy_data,
use_cudnn='always'):
# Regression test to two-dimensional max pooling layer.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = cuda.get_array_module(x_data)
# Backward computation for N-dimensional max pooling layer.
x_nd = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_nd = functions.MaxPoolingND(self.ndim, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_nd = func_nd.apply((x_nd,))[0]
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional max pooling layer.
x_2d = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_2d = functions.MaxPooling2D(ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = func_2d.apply((x_2d,))[0]
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
testing.assert_allclose(x_nd.grad, x_2d.grad)
@condition.retry(3)
def test_backward_consistency_regression_cpu(self):
self.check_backward_consistency_regression(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_consistency_regression_gpu(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_consistency_regression_no_cudnn(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), use_cudnn='never')
def test_backward_cpu_more_than_once(self):
func = functions.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
func.apply((self.x,))
func.backward((self.x,), (self.gy,))
func.backward((self.x,), (self.gy,))
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
y = functions.max_pooling_nd(
x, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
return y * y
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
dtype='d',
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx, 'never')
@attr.cudnn
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.cudnn
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
'never')
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cuda.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto') and
self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cuda.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, expect)
class TestMaxPoolingNDIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(
2 * 3 * 4 * 4, dtype=numpy.float32).reshape(2, 3, 4, 4)
def _check(self, x):
out, indices = functions.max_pooling_nd(
x, 2, cover_all=False, return_indices=True)
assert isinstance(out, chainer.Variable)
assert isinstance(out.array, type(x))
assert isinstance(indices, type(x))
assert indices.shape == out.array.shape
# Calculate expected indices.
expect = numpy.zeros(indices.shape, dtype=indices.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
expect[i, c] = numpy.array([
[xx[0:2, 0:2].ravel().argmax(),
xx[0:2, 2:4].ravel().argmax()],
[xx[2:4, 0:2].ravel().argmax(),
xx[2:4, 2:4].ravel().argmax()],
])
if out.xp is not numpy:
expect = cuda.to_gpu(expect)
assert (expect == indices).all()
def test_cpu(self):
self._check(self.x)
@attr.gpu
@attr.cudnn
def test_gpu(self):
x = cuda.to_gpu(self.x)
with chainer.using_config('use_cudnn', 'never'):
self._check(x)
with chainer.using_config('use_cudnn', 'always'):
self._check(x)
testing.run_module(__name__, __file__)
| mit | 4,564,800,891,666,489,000 | 35.830946 | 79 | 0.576085 | false |
brennanblue/svgplotlib | svgplotlib/Bar.py | 2 | 6406 | #!python -u
# -*- coding: utf-8 -*-
import sys
import itertools
from svgplotlib import Base
class Bar(Base):
"""
Simple vertical bar plot
Example::
graph = Bar(
(10,50,100),
width = 1000, height = 500,
titleColor = 'blue',
title = 'Simple bar plot',
xlabel = 'X axis',
ylabel = 'Y axis',
grid = True,
)
"""
def __init__(self, values, labels = None, colors = None, **kwargs):
super(Bar,self).__init__(**kwargs)
if labels is None:
labels = [str(i) for i in range(len(values))]
if colors is None:
colors = self.COLORS
grid = kwargs.get('grid', False)
titleColor = kwargs.get('titleColor', 'black')
titleScale = kwargs.get('titleScale', 1.25)
labelColor = kwargs.get('labelColor', 'black')
xlabelColor = kwargs.get('xlabelColor', 'black')
ylabelColor = kwargs.get('ylabelColor', 'black')
style = self.style = {
'stroke' : 'black',
'stroke-width' : '1',
'fill' : 'black',
}
textStyle = self.textStyle = {
'stroke' : 'none',
}
# plot area width and height
width = kwargs.get('width', 500)
height = kwargs.get('height', 500)
assert width > 0 and height > 0, 'width and height must be larger than 0'
aspect = float(width)/height
assert aspect > .2 and aspect < 5., 'aspect must be between .2 and 5'
self.plotWidth = width
self.plotHeight = height
# build yticks
miny = min(values)
maxy = max(values)
if miny == maxy:
miny -= 1
maxy += 1
maxNumSteps = kwargs.get('maxNumSteps', 5)
maxMinSteps = kwargs.get('maxMinSteps', 5)
y1, y2 = self.buildTicks(miny, maxy, maxNumSteps = maxNumSteps, maxMinSteps = maxMinSteps)
self.ymajorTicks, self.yminorTicks = y1, y2
# calculate scale
miny = self.miny = min(min(y1), min(y2 or (sys.maxint,)))
maxy = self.maxy = max(max(y1), max(y2 or (-sys.maxint,)))
self.yscale = self.plotHeight/(maxy - miny)
# main group
g = self.Group(**style)
# label size
delta = self.fontSize + 2*self.PAD
# find height
dy = .5*self.fontSize
title = unicode(kwargs.get('title', ''))
titleSize = None
if title:
titleSize = self.textSize(title)
dy += titleScale*(titleSize.height + titleSize.descent) + self.PAD
h = dy # Top line space
h += self.plotHeight # Plot area
h += delta # xaxis labels
xlabel = unicode(kwargs.get('xlabel', ''))
xlabelSize = None
if xlabel:
xlabelSize = self.textSize(xlabel)
h += xlabelSize.height + xlabelSize.descent + self.PAD
# find width
w = 0
dx = 0
ylabel = unicode(kwargs.get('ylabel', ''))
ylabelSize = None
if ylabel:
ylabelSize = self.textSize(ylabel)
dx += ylabelSize.height + ylabelSize.descent + 2*self.PAD
# yaxis labels
maxSize = 0
for y in self.ymajorTicks:
s = u"%g" % y
size = self.textSize(s)
maxSize = max(maxSize, size.width)
dx += maxSize + self.PAD
w += dx # side space
w += self.plotWidth # Plot area
w += delta + self.PAD
# set total size
self.set('width', w)
self.set('height', h)
# plot title and labels
if title:
xpos = .5*w - .5*titleScale*titleSize.width
ypos = .5*dy + .5*titleScale*titleSize.height - titleScale*titleSize.descent
g.EText(self.font, title, x = xpos, y = ypos, scale = titleScale,
fill = titleColor, **textStyle)
if xlabel:
xpos = .5*w - .5*xlabelSize.width
ypos = h - self.PAD
g.EText(self.font, xlabel, x = xpos, y = ypos, fill = xlabelColor, **textStyle)
if ylabel:
xpos = ylabelSize.height + ylabelSize.descent + self.PAD
ypos = dy + .5*self.plotHeight + .5*ylabelSize.width
g.EText(self.font, ylabel, x = xpos, y = ypos, rotation = -90,
fill = ylabelColor, **textStyle)
# create plot area
plotArea = self.plotArea = g.Group(transform="translate(%g,%g)" % (dx, dy))
plotArea.Rect(x = 0, y = 0, width = self.plotWidth, height = self.plotHeight, fill = 'none')
self.yaxis(0, flip = False)
self.yaxis(self.plotWidth, flip = True, text = False)
if grid:
self.grid()
# plot bars
barPAD = 4*self.PAD
barWidth = (self.plotWidth - 2*(max(1, len(values) - 1))*barPAD) / len(values)
color = itertools.cycle(colors)
x = barPAD
for idx, value in enumerate(values):
barHeight = (value - miny)*self.yscale
y = self.plotHeight - barHeight
plotArea.Rect(x = x, y = y, width = barWidth, height = barHeight, fill = color.next())
s = unicode(labels[idx])
size = self.textSize(s)
xpos = x + .5*barWidth - .5*size.width
ypos = self.plotHeight + 2*self.PAD + .5*size.height
self.plotArea.EText(self.font, s, x = xpos, y = ypos,
fill = labelColor, **self.textStyle)
x += barWidth + barPAD
if __name__ == '__main__':
from svgplotlib.SVG import show
graph = Bar(
(10,50,100),
width = 1000, height = 500,
titleColor = 'blue',
title = 'Simple bar plot',
xlabel = 'X axis',
ylabel = 'Y axis',
grid = True,
)
show(graph, graph.width, graph.height)
| bsd-3-clause | -993,610,570,614,839,600 | 30.55665 | 100 | 0.482516 | false |
fiji-flo/servo | tests/wpt/web-platform-tests/webdriver/tests/contexts/maximize_window.py | 11 | 8104 | # META: timeout=long
from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
alert_doc = inline("<script>window.alert()</script>")
def maximize(session):
return session.transport.send("POST", "session/%s/window/maximize" % session.session_id)
# 10.7.3 Maximize Window
def test_no_browsing_context(session, create_window):
"""
2. If the current top-level browsing context is no longer open,
return error with error code no such window.
"""
session.window_handle = create_window()
session.close()
response = maximize(session)
assert_error(response, "no such window")
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_accept_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
def test_handle_prompt_accept(new_session, add_browser_capabilites):
"""
3. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- accept state
Accept the current user prompt.
"""
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "accept"})}})
session.url = inline("<title>WD doc title</title>")
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
response = maximize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #1")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
response = maximize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #2")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
response = maximize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #3")
def test_handle_prompt_missing_value(session, create_dialog):
"""
3. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- missing value default state
1. Dismiss the current user prompt.
2. Return error with error code unexpected alert open.
"""
session.url = inline("<title>WD doc title</title>")
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
response = maximize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
response = maximize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
response = maximize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
def test_fully_exit_fullscreen(session):
"""
4. Fully exit fullscreen.
[...]
To fully exit fullscreen a document document, run these steps:
1. If document's fullscreen element is null, terminate these steps.
2. Unfullscreen elements whose fullscreen flag is set, within
document's top layer, except for document's fullscreen element.
3. Exit fullscreen document.
"""
session.window.fullscreen()
assert session.execute_script("return window.fullScreen") is True
response = maximize(session)
assert_success(response)
assert session.execute_script("return window.fullScreen") is False
def test_restore_the_window(session):
"""
5. Restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
session.window.minimize()
assert session.execute_script("return document.hidden") is True
response = maximize(session)
assert_success(response)
def test_maximize(session):
"""
6. Maximize the window of the current browsing context.
[...]
To maximize the window, given an operating system level window with an
associated top-level browsing context, run the implementation-specific
steps to transition the operating system level window into the
maximized window state. If the window manager supports window
resizing but does not have a concept of window maximation, the window
dimensions must be increased to the maximum available size permitted
by the window manager for the current screen. Return when the window
has completed the transition, or within an implementation-defined
timeout.
"""
before_size = session.window.size
response = maximize(session)
assert_success(response)
assert before_size != session.window.size
def test_payload(session):
"""
7. Return success with the JSON serialization of the current top-level
browsing context's window rect.
[...]
A top-level browsing context's window rect is defined as a
dictionary of the screenX, screenY, width and height attributes of
the WindowProxy. Its JSON representation is the following:
"x"
WindowProxy's screenX attribute.
"y"
WindowProxy's screenY attribute.
"width"
Width of the top-level browsing context's outer dimensions,
including any browser chrome and externally drawn window
decorations in CSS reference pixels.
"height"
Height of the top-level browsing context's outer dimensions,
including any browser chrome and externally drawn window
decorations in CSS reference pixels.
"""
before_size = session.window.size
response = maximize(session)
# step 5
assert response.status == 200
assert isinstance(response.body["value"], dict)
value = response.body["value"]
assert "width" in value
assert "height" in value
assert "x" in value
assert "y" in value
assert isinstance(value["width"], int)
assert isinstance(value["height"], int)
assert isinstance(value["x"], int)
assert isinstance(value["y"], int)
assert before_size != session.window.size
def test_maximize_twice_is_idempotent(session):
first_response = maximize(session)
assert_success(first_response)
max_size = session.window.size
second_response = maximize(session)
assert_success(second_response)
assert session.window.size == max_size
"""
TODO(ato): Implicit session start does not use configuration passed on
from wptrunner. This causes an exception.
See https://bugzil.la/1398459.
def test_maximize_when_resized_to_max_size(session):
# Determine the largest available window size by first maximising
# the window and getting the window rect dimensions.
#
# Then resize the window to the maximum available size.
session.end()
available = session.window.maximize()
session.end()
session.window.size = available
# In certain window managers a window extending to the full available
# dimensions of the screen may not imply that the window is maximised,
# since this is often a special state. If a remote end expects a DOM
# resize event, this may not fire if the window has already reached
# its expected dimensions.
before = session.window.size
session.window.maximize()
assert session.window.size == before
"""
| mpl-2.0 | -8,871,542,050,775,035,000 | 28.256318 | 127 | 0.692991 | false |
code4futuredotorg/reeborg_tw | src/libraries/Brython3.2.3/Lib/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 | -3,722,531,130,212,565,500 | 43.263844 | 109 | 0.530871 | false |
xolox/python-deb-pkg-tools | deb_pkg_tools/config.py | 1 | 2091 | # Debian packaging tools: Configuration defaults.
#
# Author: Peter Odding <[email protected]>
# Last Change: February 6, 2020
# URL: https://github.com/xolox/python-deb-pkg-tools
"""Configuration defaults for the `deb-pkg-tools` package."""
# Standard library modules.
import os
# External dependencies.
from humanfriendly import parse_path
# Public identifiers that require documentation.
__all__ = (
"package_cache_directory",
"repo_config_file",
"system_cache_directory",
"system_config_directory",
"user_cache_directory",
"user_config_directory",
)
system_config_directory = '/etc/deb-pkg-tools'
"""The pathname of the global (system wide) configuration directory used by `deb-pkg-tools` (a string)."""
system_cache_directory = '/var/cache/deb-pkg-tools'
"""The pathname of the global (system wide) package cache directory (a string)."""
user_config_directory = parse_path('~/.deb-pkg-tools')
"""
The pathname of the current user's configuration directory used by `deb-pkg-tools` (a string).
:default: The expanded value of ``~/.deb-pkg-tools``.
"""
user_cache_directory = parse_path('~/.cache/deb-pkg-tools')
"""
The pathname of the current user's package cache directory (a string).
:default: The expanded value of ``~/.cache/deb-pkg-tools``.
"""
# The location of the package cache. If we're running as root we have write
# access to the system wide package cache so we'll pick that; the more users
# sharing this cache the more effective it is.
package_cache_directory = system_cache_directory if os.getuid() == 0 else user_cache_directory
"""
The pathname of the selected package cache directory (a string).
:default: The value of :data:`system_cache_directory` when running as ``root``,
the value of :data:`user_cache_directory` otherwise.
"""
repo_config_file = 'repos.ini'
"""
The base name of the configuration file with user-defined Debian package repositories (a string).
This configuration file is loaded from :data:`system_config_directory` and/or
:data:`user_config_directory`.
:default: The string ``repos.ini``.
"""
| mit | 166,451,441,080,455,400 | 31.169231 | 106 | 0.724055 | false |
40223114/2015_g4 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py | 603 | 55779 | ## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
| gpl-3.0 | -909,583,546,123,830,800 | 34.015066 | 95 | 0.597895 | false |
charukiewicz/beer-manager | venv/lib/python3.4/site-packages/pip/commands/__init__.py | 476 | 2236 | """
Package containing all pip commands
"""
from pip.commands.bundle import BundleCommand
from pip.commands.completion import CompletionCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.unzip import UnzipCommand
from pip.commands.zip import ZipCommand
from pip.commands.wheel import WheelCommand
commands = {
BundleCommand.name: BundleCommand,
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
UnzipCommand.name: UnzipCommand,
ZipCommand.name: ZipCommand,
ListCommand.name: ListCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
SearchCommand,
WheelCommand,
ZipCommand,
UnzipCommand,
BundleCommand,
HelpCommand,
]
def get_summaries(ignore_hidden=True, ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands, commands_order)
else:
cmditems = commands.items()
for name, command_class in cmditems:
if ignore_hidden and command_class.hidden:
continue
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
close_commands = get_close_matches(name, commands.keys())
if close_commands:
guess = close_commands[0]
else:
guess = False
return guess
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
| mit | 5,329,352,263,952,085,000 | 24.409091 | 63 | 0.7178 | false |
jpirko/lnst | lnst/Recipes/ENRT/VirtualOvsBridgeVlansOverBondRecipe.py | 1 | 7385 | import logging
from itertools import product
from lnst.Common.Parameters import Param, IntParam, StrParam
from lnst.Common.IpAddress import ipaddress
from lnst.Controller import HostReq, DeviceReq, RecipeParam
from lnst.Recipes.ENRT.BaseEnrtRecipe import BaseEnrtRecipe
from lnst.Recipes.ENRT.ConfigMixins.OffloadSubConfigMixin import (
OffloadSubConfigMixin)
from lnst.Recipes.ENRT.ConfigMixins.CommonHWSubConfigMixin import (
CommonHWSubConfigMixin)
from lnst.Recipes.ENRT.PingMixins import VlanPingEvaluatorMixin
from lnst.RecipeCommon.Ping.PingEndpoints import PingEndpoints
from lnst.Devices import OvsBridgeDevice
class VirtualOvsBridgeVlansOverBondRecipe(VlanPingEvaluatorMixin,
CommonHWSubConfigMixin, OffloadSubConfigMixin, BaseEnrtRecipe):
host1 = HostReq()
host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.tap0 = DeviceReq(label="to_guest1")
host1.tap1 = DeviceReq(label="to_guest2")
host2 = HostReq()
host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host2.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host2.tap0 = DeviceReq(label="to_guest3")
host2.tap1 = DeviceReq(label="to_guest4")
guest1 = HostReq()
guest1.eth0 = DeviceReq(label="to_guest1")
guest2 = HostReq()
guest2.eth0 = DeviceReq(label="to_guest2")
guest3 = HostReq()
guest3.eth0 = DeviceReq(label="to_guest3")
guest4 = HostReq()
guest4.eth0 = DeviceReq(label="to_guest4")
offload_combinations = Param(default=(
dict(gro="on", gso="on", tso="on", tx="on"),
dict(gro="off", gso="on", tso="on", tx="on"),
dict(gro="on", gso="off", tso="off", tx="on"),
dict(gro="on", gso="on", tso="off", tx="off")))
bonding_mode = StrParam(mandatory = True)
miimon_value = IntParam(mandatory = True)
def test_wide_configuration(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
for host, port_name in [(host1, "bond_port1"),
(host2, "bond_port2")]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1]:
dev.down()
host.br0 = OvsBridgeDevice()
for dev, tag in [(host.tap0, "10"), (host.tap1, "20")]:
host.br0.port_add(device=dev, port_options={'tag': tag})
#miimon cannot be set due to colon in argument name -->
#other_config:bond-miimon-interval
host.br0.bond_add(port_name, (host.eth0, host.eth1),
bond_mode=self.params.bonding_mode)
guest1.eth0.down()
guest2.eth0.down()
guest3.eth0.down()
guest4.eth0.down()
configuration = super().test_wide_configuration()
configuration.test_wide_devices = [guest1.eth0, guest2.eth0,
guest3.eth0, guest4.eth0]
net_addr_1 = "192.168.10"
net_addr6_1 = "fc00:0:0:1"
net_addr_2 = "192.168.20"
net_addr6_2 = "fc00:0:0:2"
for i, guest in enumerate([guest1, guest3]):
guest.eth0.ip_add(ipaddress(net_addr_1 + "." + str(i+1) +
"/24"))
guest.eth0.ip_add(ipaddress(net_addr6_1 + "::" + str(i+1) +
"/64"))
for i, guest in enumerate([guest2, guest4]):
guest.eth0.ip_add(ipaddress(net_addr_2 + "." + str(i+1) +
"/24"))
guest.eth0.ip_add(ipaddress(net_addr6_2 + "::" + str(i+1) +
"/64"))
for host in [host1, host2]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1,
host.br0]:
dev.up()
for guest in [guest1, guest2, guest3, guest4]:
guest.eth0.up()
if "perf_tool_cpu" in self.params:
logging.info("'perf_tool_cpu' param (%d) to be set to None" %
self.params.perf_tool_cpu)
self.params.perf_tool_cpu = None
self.wait_tentative_ips(configuration.test_wide_devices)
return configuration
def generate_test_wide_description(self, config):
host1, host2 = self.matched.host1, self.matched.host2
desc = super().generate_test_wide_description(config)
desc += [
"\n".join([
"Configured {}.{}.ips = {}".format(
dev.host.hostid, dev.name, dev.ips
)
for dev in config.test_wide_devices
]),
"\n".join([
"Configured {}.{}.ports = {}".format(
dev.host.hostid, dev.name, dev.ports
)
for dev in [host1.br0, host2.br0]
]),
"\n".join([
"Configured {}.{}.bonds = {}".format(
dev.host.hostid, dev.name, dev.bonds
)
for dev in [host1.br0, host2.br0]
])
]
return desc
def test_wide_deconfiguration(self, config):
del config.test_wide_devices
super().test_wide_deconfiguration(config)
def generate_ping_endpoints(self, config):
guest1, guest2, guest3, guest4 = (self.matched.guest1,
self.matched.guest2, self.matched.guest3, self.matched.guest4)
dev_combinations = product(
[guest1.eth0, guest2.eth0],
[guest3.eth0, guest4.eth0]
)
return [
PingEndpoints(
comb[0], comb[1],
reachable=((comb[0].host, comb[1].host) in [
(guest1, guest3),
(guest2, guest4)
])
)
for comb in dev_combinations
]
def generate_perf_endpoints(self, config):
return [(self.matched.guest1.eth0, self.matched.guest3.eth0)]
@property
def offload_nics(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
result = []
for machine in host1, host2, guest1, guest2, guest3, guest4:
result.append(machine.eth0)
result.extend([host1.eth1, host2.eth1])
return result
@property
def mtu_hw_config_dev_list(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
result = []
for host in [host1, host2]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1,
host.br0]:
result.append(dev)
for guest in [guest1, guest2, guest3, guest4]:
result.append(guest.eth0)
return result
@property
def dev_interrupt_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host1.eth1,
self.matched.host2.eth0, self.matched.host2.eth1]
@property
def parallel_stream_qdisc_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host1.eth1,
self.matched.host2.eth0, self.matched.host2.eth1]
| gpl-2.0 | -5,219,096,291,925,064,000 | 37.264249 | 75 | 0.585511 | false |
JonDoNym/peinjector | peinjector/connectors/python/libPePatch.py | 34 | 4600 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Provides de-serialization and in-stream patch applying capabilities for PE Files
"""
__author__ = 'A.A.'
# Unpack binary data
from struct import unpack_from
# Holds an single patch part
class PePatchPart(object):
# Constructor
def __init__(self, mem, position, insert):
self.mem = mem
self.next = None
self.position = position
self.insert = insert
self.finished = False
# Deserialize and applies patches on pe files
class PePatch(object):
# Sentinel size
pepatch_sentinelsize = 9;
# First Patch part
first = None
# Constructor
def __init__(self, serialized_memory):
serialized_mem_size = len(serialized_memory)
current_position = 0
current = None
patch = None
# Deserialize data
while (serialized_mem_size - current_position) >= self.pepatch_sentinelsize:
mem_size, position, insert = unpack_from("<II?", serialized_memory, current_position)
# 2*sizeof(uint32_t) + sizeof(uint8_t)
current_position += 9
# Length Error
if (serialized_mem_size - current_position) < mem_size:
return
# Extract Data
patch_data = serialized_memory[current_position:current_position + mem_size]
# Change Position
current_position += mem_size
# Add Patch
if mem_size > 0:
patch = PePatchPart(patch_data, position, insert)
else:
patch = None
# Build chain
if current is not None:
current.next = patch
if self.first is None:
self.first = patch
current = patch
# Length Error
if (serialized_mem_size - current_position) > 0:
self.first = None
return
# Patch is ok
def patch_ok(self):
return self.first is not None
# Apply patch on stream data
def apply_patch(self, mem, position):
all_finished = True
# Nothing to patch
if self.first is None:
return mem
# Apply each patch part
current = self.first
while current is not None:
# Finished, no need to check
if current.finished:
current = current.next
continue
# Patch starts inside memory
if position <= current.position < (position + len(mem)):
delta_position = current.position - position
# Insert
if current.insert:
mem = mem[:delta_position] + current.mem + mem[delta_position:]
# Patch part finished
current.finished = True
# Overwrite
else:
mem = mem[:delta_position] + current.mem[:len(mem)-delta_position] \
+ mem[delta_position+len(current.mem):]
# Patch applied
all_finished = False
# Append after current mem part (important if current part is the last part)
elif current.insert and (current.position == (position + len(mem))):
# Append patch
mem = mem + current.mem
# Patch part finished
current.finished = True
# Patch applied
all_finished = False
# Patch starts before memory
elif (not current.insert) and ((current.position + len(current.mem)) > position)\
and (current.position < position):
delta_position = position - current.position
mem = current.mem[delta_position:delta_position+len(mem)] + mem[len(current.mem)-delta_position:]
# Patch applied
all_finished = False
# Patch finished
elif (current.position + len(current.mem)) < position:
current.finished = True
# Reset total finished
else:
# Patch waiting
all_finished = False
# Next patch part
current = current.next
# Patch finished
if all_finished:
self.first = None
# Return patched memory
return mem
| unlicense | -4,594,839,702,750,307,300 | 29.666667 | 113 | 0.510652 | false |
fsimkovic/cptbx | conkit/io/tests/test_pdb.py | 2 | 10444 | """Testing facility for conkit.io.PdbIO"""
__author__ = "Felix Simkovic"
__date__ = "26 Oct 2016"
import os
import unittest
from conkit.io.pdb import PdbParser
from conkit.io.tests.helpers import ParserTestCase
class TestPdbIO(ParserTestCase):
def test_read_1(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=8, atom_type="CB")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(2, len(contact_map1))
self.assertEqual([36, 86], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86, 208], [c.res2_seq for c in contact_map1 if c.true_positive])
self.assertEqual([0.934108, 0.920229], [c.raw_score for c in contact_map1 if c.true_positive])
def test_read_2(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=8, atom_type="CA")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(1, len(contact_map1))
self.assertEqual([36], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86], [c.res2_seq for c in contact_map1 if c.true_positive])
self.assertEqual([0.934927], [c.raw_score for c in contact_map1 if c.true_positive])
def test_read_3(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=7, atom_type="CB")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(1, len(contact_map1))
self.assertEqual([36], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86], [c.res2_seq for c in contact_map1 if c.true_positive])
self.assertEqual([0.934108], [c.raw_score for c in contact_map1 if c.true_positive])
def test_read_4(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
TER
ATOM 9 N TRP B 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP B 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP B 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP B 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE B 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE B 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE B 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE B 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=8, atom_type="CB")
# Two maps because no contacts in B
contact_map1 = contact_file["A"] # chain A
contact_map2 = contact_file["AB"] # chain AB
contact_map3 = contact_file["BA"] # chain BA
self.assertEqual(3, len(contact_file))
self.assertEqual(1, len(contact_map1))
self.assertEqual(["A", "A"], [contact_map1.top_contact.res1_chain, contact_map1.top_contact.res2_chain])
self.assertEqual([36, 86], [contact_map1.top_contact.res1_seq, contact_map1.top_contact.res2_seq])
self.assertEqual(1, len(contact_map2))
self.assertEqual(["A", "B"], [contact_map2.top_contact.res1_chain, contact_map2.top_contact.res2_chain])
self.assertEqual([86, 208], [contact_map2.top_contact.res1_seq, contact_map2.top_contact.res2_seq])
self.assertEqual(1, len(contact_map3))
self.assertEqual(["B", "A"], [contact_map3.top_contact.res1_chain, contact_map3.top_contact.res2_chain])
self.assertEqual([208, 86], [contact_map3.top_contact.res1_seq, contact_map3.top_contact.res2_seq])
def test_read_5(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=0, atom_type="CB")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(6, len(contact_map1))
self.assertEqual([36, 36, 36, 86, 86, 171], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86, 171, 208, 171, 208, 208], [c.res2_seq for c in contact_map1 if c.true_positive])
if __name__ == "__main__":
unittest.main(verbosity=2)
| gpl-3.0 | 5,277,282,713,027,556,000 | 60.798817 | 112 | 0.519724 | false |
ncdesouza/bookworm | env/lib/python2.7/site-packages/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| gpl-3.0 | 4,455,206,967,161,460,700 | 37.878689 | 90 | 0.48676 | false |
iamroot12C/linux | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 | 6,164,876,250,087,427,000 | 22.32 | 90 | 0.641509 | false |
noroutine/ansible | lib/ansible/utils/module_docs_fragments/openstack.py | 133 | 3961 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and
I(auth_type). This parameter is not needed if I(auth) is provided or if
OpenStack OS_* environment variables are present.
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided or
OpenStack OS_* environment variables are present.
required: false
auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
region_name:
description:
- Name of the region.
required: false
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
api_timeout:
description:
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
required: false
default: None
validate_certs:
description:
- Whether or not SSL API requests should be verified. Before 2.3 this defaulted to True.
required: false
default: null
aliases: ['verify']
cacert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
required: false
default: None
cert:
description:
- A path to a client certificate to use as part of the SSL transaction.
required: false
default: None
key:
description:
- A path to a client key to use as part of the SSL transaction.
required: false
default: None
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [public, internal, admin]
required: false
default: public
requirements:
- python >= 2.7
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be used instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays. More information can be found at
U(http://docs.openstack.org/developer/os-client-config)
'''
| gpl-3.0 | -2,224,290,216,266,020,600 | 35.675926 | 94 | 0.70664 | false |
crakensio/django_training | lib/python2.7/site-packages/pip/vcs/bazaar.py | 393 | 4943 | import os
import tempfile
import re
from pip.backwardcompat import urlparse
from pip.log import logger
from pip.util import rmtree, display_path, call_subprocess
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
bundle_file = 'bzr-branch.txt'
schemes = ('bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp')
guide = ('# This was a Bazaar branch; to make it a branch again run:\n'
'bzr branch -r %(rev)s %(url)s .\n')
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(['lp'])
urlparse.non_hierarchical.extend(['lp'])
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^bzr\s*branch\s*-r\s*(\d*)', line)
if match:
rev = match.group(1).strip()
url = line[match.end():].strip().split(None, 1)[0]
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Bazaar repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
call_subprocess([self.cmd, 'export', location], cwd=temp_dir,
filter_stdout=self._filter, show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
call_subprocess([self.cmd, 'switch', url], cwd=dest)
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = call_subprocess(
[self.cmd, 'info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = call_subprocess(
[self.cmd, 'revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| cc0-1.0 | -1,570,173,747,630,039,300 | 36.732824 | 90 | 0.544609 | false |
agoravoting/agora-results | agora_results/pipes/pdf.py | 1 | 18370 | # -*- coding:utf-8 -*-
# This file is part of agora-results.
# Copyright (C) 2016-2021 Agora Voting SL <[email protected]>
# agora-results is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora-results is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with agora-results. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import json
import requests
from datetime import datetime, timedelta
from reportlab.lib import colors
from reportlab.platypus import (
SimpleDocTemplate,
Paragraph,
Spacer,
Table,
TableStyle,
Image
)
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.lib.enums import TA_RIGHT, TA_LEFT, TA_CENTER
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
import gettext
import os
def configure_pdf(
data_list,
title=None,
first_description_paragraph=None,
last_description_paragraph=None,
languages=None
):
data = data_list[0]
data['pdf'] = {}
if title:
assert(isinstance(title, str))
data['pdf']['title'] = title
if first_description_paragraph:
assert(isinstance(first_description_paragraph, str))
data['pdf']['first_description_paragraph'] = first_description_paragraph
if last_description_paragraph:
assert(isinstance(last_description_paragraph, str))
data['pdf']['last_description_paragraph'] = last_description_paragraph
if languages:
assert(isinstance(languages, list))
for language in languages:
assert(isinstance(language, str))
data['pdf']['languages'] = languages
def gen_text(
text,
size=None,
bold=False,
align=None,
color='black',
fontName=None
):
if not isinstance(text, str):
text = text.__str__()
p = ParagraphStyle('test')
if fontName:
p.fontName = fontName
if size:
p.fontSize = size
p.leading = size * 1.2
if bold:
text = '<b>%s</b>' % text
p.textColor = color
if align:
p.alignment = align
return Paragraph(text, p)
def get_election_cfg(election_id):
headers = {'content-type': 'application/json'}
base_url = 'http://localhost:9000/api'
url = '%s/election/%d' % (base_url, election_id)
try:
r = requests.get(url, headers=headers, timeout=5)
except requests.exceptions.Timeout:
raise Exception(
'Timeout when requesting election_id = %s' % election_id
)
if r.status_code != 200:
print(r.status_code, r.text)
raise Exception(
'Invalid status code: %d for election_id = %s' % (
r.status_code,
election_id
)
)
return r.json()
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number(num_pages)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self, page_count):
self.setFont("Helvetica", 7)
self.drawRightString(200*mm, 20*mm,
"Page %d of %d" % (self._pageNumber, page_count))
def _header_footer(canvas, doc):
# Save the state of our canvas so we can draw on it
canvas.saveState()
styles = getSampleStyleSheet()
# Header
header = Image(
'/home/agoraelections/agora-results/img/nvotes_logo.jpg',
height=20,
width=80
)
header.hAlign = 'RIGHT'
w, h = header.wrap(doc.width, doc.topMargin)
header.drawOn(
canvas,
doc.width - w + doc.rightMargin,
doc.height + h + doc.bottomMargin - doc.topMargin
)
# Release the canvas
canvas.restoreState()
def pdf_print(election_results, config_folder, election_id):
localedir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'locale'
)
translate = gettext.translation(
'pipes',
localedir,
languages=election_results.get('pdf', dict()).get('languages', None),
fallback=True
)
_ = translate.gettext
try:
jsonconfig = get_election_cfg(election_id)
election_title = jsonconfig['payload']['configuration']['title']
except:
election_title = ""
tx_description = _(
'Detailed and question by question results of the election ' +
'{election_id} titled <u>"{election_title}"</u>.'
).format(
election_id=election_id,
election_title=election_title
)
tx_title = _(
'Results of the election tally {election_id} - {election_title}'
).format(
election_id=election_id,
election_title=election_title
)
pdf_path = os.path.join(config_folder, "%s.results.pdf" % election_id)
styleSheet = getSampleStyleSheet()
doc = SimpleDocTemplate(
pdf_path,
rightMargin=50,
leftMargin=50,
topMargin=35,
bottomMargin=80
)
elements = []
the_title = tx_title
if 'pdf' in election_results and 'title' in election_results['pdf']:
the_title = election_results['pdf']['title']
elements.append(Spacer(0, 15))
elements.append(gen_text(the_title, size=20, bold=True, align = TA_LEFT))
elements.append(Spacer(0, 15))
if (
'pdf' in election_results and
'first_description_paragraph' in election_results['pdf']
):
elements.append(
gen_text(
election_results['pdf']['first_description_paragraph'],
size=12,
align=TA_LEFT
)
)
elements.append(Spacer(0, 15))
elements.append(gen_text(tx_description, size=12, align = TA_LEFT))
elements.append(Spacer(0, 15))
if (
'pdf' in election_results and
'last_description_paragraph' in election_results['pdf']
):
elements.append(
gen_text(
election_results['pdf']['last_description_paragraph'],
size=12,
align=TA_LEFT
)
)
elements.append(Spacer(0, 15))
doc.title = tx_title
'''
Returns the percentage points, ensuring it works with base=0
'''
def get_percentage(num, base):
if base == 0:
return 0
else:
return num/base
counts = election_results['results']['questions']
for question, i in zip(counts, range(len(counts))):
blank_votes = question['totals']['blank_votes']
null_votes = question['totals']['null_votes']
valid_votes = question['totals']['valid_votes']
total_votes = blank_votes + null_votes + valid_votes
percent_base = question['answer_total_votes_percentage']
if percent_base == "over-total-votes":
base_num = total_votes
elif percent_base == "over-total-valid-votes":
base_num = question['totals']['valid_votes']
elif (
"over-total-valid-points" == percent_base and
"valid_points" in question['totals']
):
base_num = question['totals']['valid_points']
elements.append(
gen_text(
_('Question {question_index}: {question_title}').format(
question_index=i+1,
question_title=question['title']
),
size=15,
bold=True,
align=TA_LEFT
)
)
elements.append(Spacer(0, 15))
t = Table([[
gen_text(
_('Configuration Data'),
align=TA_CENTER
)
]])
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,-1),'#b6d7a8'),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t.setStyle(table_style)
elements.append(t)
tally_type = {
"plurality-at-large": _(
"First past the post, Plurality or Plurality at Large"
),
"cumulative": _("Cumulative voting"),
"borda-nauru": _("Borda Nauru or Borda Dowdall (1/n)"),
"borda": "Borda Count (traditional)",
"pairwise-beta": _("Pairwise comparison (beta distribution)"),
"desborda3": _("Desborda3"),
"desborda2": _("Desborda2"),
"desborda": _("Desborda")
}
data = [
[
gen_text(
_('Tally system'),
align=TA_RIGHT
),
gen_text(tally_type[question['tally_type']], align=TA_LEFT)
],
[
gen_text(
_('Minimum number of options a voter can select'),
align=TA_RIGHT
),
gen_text(str(question['min']), align=TA_LEFT)
],
[
gen_text(
_('Maximum number of options a voter can select'),
align=TA_RIGHT
),
gen_text(str(question['max']), align=TA_LEFT)
],
[
gen_text(
_('Number of winning options'),
align=TA_RIGHT
),
gen_text(str(question['num_winners']), align=TA_LEFT)
],
[
gen_text(
_('Options appear in the voting booth in random order'),
align=TA_RIGHT
),
gen_text(
_('Yes')
if (
'shuffle_all_options' in question['extra_options'] and
question['extra_options']['shuffle_all_options']
)
else _('No'),
align=TA_LEFT
)
]
]
table_style = TableStyle(
[
('BACKGROUND',(0,0),(0,-1),'#efefef'),
('INNERGRID', (0,0), (-1,-1), 0.5, colors.grey),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t = Table(data)
t.setStyle(table_style)
elements.append(t)
elements.append(Spacer(0, 15))
t = Table(
[
[
gen_text(
_('Participation in question {question_index}').format(
question_index=i + 1
),
align=TA_CENTER
)
]
]
)
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,-1),'#b6d7a8'),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t.setStyle(table_style)
elements.append(t)
data = [
[
gen_text(_('Total number of votes cast'), align=TA_RIGHT),
gen_text(str(total_votes), align=TA_LEFT)
],
[
gen_text(_('Blank votes'), align=TA_RIGHT),
gen_text(
_(
"{blank_votes} ({percentage:.2%} over the total " +
"number of votes)"
).format(
blank_votes=blank_votes,
percentage=get_percentage(blank_votes, total_votes)
),
align=TA_LEFT
)
],
[
gen_text(_('Null votes'), align=TA_RIGHT),
gen_text(
_(
"{null_votes} ({percentage:.2%} over the total " +
"number of votes)"
).format(
null_votes=null_votes,
percentage=get_percentage(null_votes, total_votes)
),
align=TA_LEFT
)
],
[
gen_text(
_('Total number of votes for options'),
align=TA_RIGHT
),
gen_text(
_(
"{valid_votes} ({percentage:.2%} over the total " +
"number of votes)"
).format(
valid_votes=valid_votes,
percentage=get_percentage(valid_votes, total_votes)
),
align=TA_LEFT
)
],
[
gen_text(
_('Voting period start date'),
align=TA_RIGHT
),
gen_text(
str(
datetime.strptime(
jsonconfig['payload']['startDate'],
'%Y-%m-%dT%H:%M:%S.%f'
)
),
align=TA_LEFT
)
],
[
gen_text(
_('Voting period end date'),
align=TA_RIGHT
),
gen_text(
str(
datetime.strptime(
jsonconfig['payload']['endDate'],
'%Y-%m-%dT%H:%M:%S.%f'
)
),
align=TA_LEFT
)
],
[
gen_text(_('Tally end date'), align=TA_RIGHT),
gen_text(
str(
datetime.strptime(
jsonconfig['date'],
'%Y-%m-%d %H:%M:%S.%f'
)
),
align=TA_LEFT
)
]
]
table_style = TableStyle(
[
('BACKGROUND',(0,0),(0,-1),'#efefef'),
('INNERGRID', (0,0), (-1,-1), 0.5, colors.grey),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t=Table(data)
t.setStyle(table_style)
elements.append(t)
elements.append(Spacer(0, 15))
t = Table([[
gen_text(
_('Candidate results'),
align=TA_CENTER
)
]])
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,-1),'#b6d7a8'),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t.setStyle(table_style)
elements.append(t)
winners = sorted(
[
answer
for answer in question['answers']
if answer['winner_position'] is not None
],
key=lambda a: a['winner_position']
)
losers_by_name = sorted(
[
answer for answer in question['answers']
if answer['winner_position'] is None
],
key=lambda a: a['text']
)
losers = sorted(
losers_by_name,
key=lambda a: float(a['total_count']),
reverse=True
)
data = [
[
gen_text(
_('Name'),
align=TA_RIGHT
),
gen_text(
_('Points'),
align=TA_CENTER
),
gen_text(
_('Winning position'),
align=TA_LEFT
)
]
]
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,0),'#cccccc'),
('BACKGROUND',(0,1),(0,-1),'#efefef'),
('BACKGROUND',(-1,1),(-1,-1),'#efefef'),
('INNERGRID', (0,0), (-1,-1), 0.5, colors.grey),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
for answer in winners:
answer_text = answer['text']
if dict(title='isWriteInResult', url='true') in answer.get('urls', []):
answer_text = _('{candidate_text} (Write-in)').format(
candidate_text=answer['text']
)
data.append(
[
gen_text(answer_text, bold = True, align=TA_RIGHT),
gen_text(
'%d' % answer['total_count'],
bold=True,
align=TA_CENTER
),
gen_text(
'%dº' % (answer['winner_position'] + 1),
bold=True,
align=TA_LEFT
)
]
)
for loser in losers:
loser_text = loser['text']
if dict(title='isWriteInResult', url='true') in loser.get('urls', []):
loser_text = _('{candidate_text} (Write-in)').format(
candidate_text=loser['text']
)
data.append(
[
gen_text(loser_text, align=TA_RIGHT),
gen_text(
'%d' % loser['total_count'],
align=TA_CENTER
),
gen_text('-', align=TA_LEFT)
]
)
t = Table(data)
t.setStyle(table_style)
elements.append(t)
elements.append(Spacer(0, 15))
doc.build(
elements,
onFirstPage=_header_footer,
onLaterPages=_header_footer,
canvasmaker=NumberedCanvas
)
| agpl-3.0 | -5,501,727,474,739,686,000 | 30.507719 | 83 | 0.463008 | false |
PyLearner/tp-qemu | generic/tests/ioquit.py | 9 | 1206 | import logging
import time
import random
from autotest.client.shared import error
@error.context_aware
def run(test, params, env):
"""
Emulate the poweroff under IO workload(dd so far) with signal SIGKILL.
1) Boot a VM
2) Add IO workload for guest OS
3) Sleep for a random time
4) Kill the VM
:param test: Kvm test object
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
login_timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=login_timeout)
session2 = vm.wait_for_login(timeout=login_timeout)
bg_cmd = params.get("background_cmd")
error.context("Add IO workload for guest OS.", logging.info)
session.cmd_output(bg_cmd, timeout=60)
error.context("Verify the background process is running")
check_cmd = params.get("check_cmd")
session2.cmd(check_cmd, timeout=60)
error.context("Sleep for a random time", logging.info)
time.sleep(random.randrange(30, 100))
session2.cmd(check_cmd, timeout=60)
error.context("Kill the VM", logging.info)
vm.process.close()
| gpl-2.0 | 7,649,905,783,876,988,000 | 28.414634 | 74 | 0.687396 | false |
scripteed/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-strip-default.py | 232 | 2448 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the default STRIP_STYLEs match between different generators.
"""
import TestGyp
import re
import subprocess
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR='strip'
test.run_gyp('test-defaults.gyp', chdir=CHDIR)
test.build('test-defaults.gyp', test.ALL, chdir=CHDIR)
# Lightweight check if stripping was done.
def OutPath(s):
return test.built_file_path(s, chdir=CHDIR)
def CheckNsyms(p, o_expected):
proc = subprocess.Popen(['nm', '-aU', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
# Filter out mysterious "00 0000 OPT radr://5614542" symbol which
# is apparently only printed on the bots (older toolchain?).
# Yes, "radr", not "rdar".
o = ''.join(filter(lambda s: 'radr://5614542' not in s, o.splitlines(True)))
o = o.replace('A', 'T')
o = re.sub(r'^[a-fA-F0-9]+', 'XXXXXXXX', o, flags=re.MULTILINE)
assert not proc.returncode
if o != o_expected:
print 'Stripping: Expected symbols """\n%s""", got """\n%s"""' % (
o_expected, o)
test.fail_test()
CheckNsyms(OutPath('libsingle_dylib.dylib'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_so.so'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_exe'),
"""\
XXXXXXXX T __mh_execute_header
""")
CheckNsyms(test.built_file_path(
'bundle_dylib.framework/Versions/A/bundle_dylib', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_so.bundle/Contents/MacOS/bundle_so', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_exe.app/Contents/MacOS/bundle_exe', chdir=CHDIR),
"""\
XXXXXXXX T __mh_execute_header
""")
test.pass_test()
| gpl-3.0 | 4,680,655,455,321,747,000 | 24.768421 | 80 | 0.684641 | false |
saimn/astropy | astropy/wcs/wcsapi/tests/test_utils.py | 11 | 1548 | import numpy as np
from numpy.testing import assert_allclose
import pytest
from pytest import raises
from astropy import units as u
from astropy.wcs import WCS
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs.wcsapi.utils import deserialize_class, wcs_info_str
def test_construct():
result = deserialize_class(('astropy.units.Quantity', (10,), {'unit': 'deg'}))
assert_quantity_allclose(result, 10 * u.deg)
def test_noconstruct():
result = deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}), construct=False)
assert result == (u.Quantity, (), {'unit': 'deg'})
def test_invalid():
with raises(ValueError) as exc:
deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}, ()))
assert exc.value.args[0] == 'Expected a tuple of three values'
DEFAULT_1D_STR = """
WCS Transformation
This transformation has 1 pixel and 1 world dimensions
Array shape (Numpy order): None
Pixel Dim Axis Name Data size Bounds
0 None None None
World Dim Axis Name Physical Type Units
0 None None unknown
Correlation between pixel and world axes:
Pixel Dim
World Dim 0
0 yes
"""
def test_wcs_info_str():
# The tests in test_sliced_low_level_wcs.py excercise wcs_info_str
# extensively. This test is to ensure that the function exists and the
# API of the function works as expected.
wcs_empty = WCS(naxis=1)
assert wcs_info_str(wcs_empty).strip() == DEFAULT_1D_STR.strip()
| bsd-3-clause | -1,165,886,237,608,343,800 | 23.967742 | 96 | 0.674419 | false |
arnaudsj/suds | suds/servicedefinition.py | 200 | 8478 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{service definition} provides a textual representation of a service.
"""
from logging import getLogger
from suds import *
import suds.metrics as metrics
from suds.sax import Namespace
log = getLogger(__name__)
class ServiceDefinition:
"""
A service definition provides an object used to generate a textual description
of a service.
@ivar wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
@ivar service: The service object.
@type service: L{suds.wsdl.Service}
@ivar ports: A list of port-tuple: (port, [(method-name, pdef)])
@type ports: [port-tuple,..]
@ivar prefixes: A list of remapped prefixes.
@type prefixes: [(prefix,uri),..]
@ivar types: A list of type definitions
@type types: [I{Type},..]
"""
def __init__(self, wsdl, service):
"""
@param wsdl: A wsdl object
@type wsdl: L{Definitions}
@param service: A service B{name}.
@type service: str
"""
self.wsdl = wsdl
self.service = service
self.ports = []
self.params = []
self.types = []
self.prefixes = []
self.addports()
self.paramtypes()
self.publictypes()
self.getprefixes()
self.pushprefixes()
def pushprefixes(self):
"""
Add our prefixes to the wsdl so that when users invoke methods
and reference the prefixes, the will resolve properly.
"""
for ns in self.prefixes:
self.wsdl.root.addPrefix(ns[0], ns[1])
def addports(self):
"""
Look through the list of service ports and construct a list of tuples where
each tuple is used to describe a port and it's list of methods as:
(port, [method]). Each method is tuple: (name, [pdef,..] where each pdef is
a tuple: (param-name, type).
"""
timer = metrics.Timer()
timer.start()
for port in self.service.ports:
p = self.findport(port)
for op in port.binding.operations.values():
m = p[0].method(op.name)
binding = m.binding.input
method = (m.name, binding.param_defs(m))
p[1].append(method)
metrics.log.debug("method '%s' created: %s", m.name, timer)
p[1].sort()
timer.stop()
def findport(self, port):
"""
Find and return a port tuple for the specified port.
Created and added when not found.
@param port: A port.
@type port: I{service.Port}
@return: A port tuple.
@rtype: (port, [method])
"""
for p in self.ports:
if p[0] == p: return p
p = (port, [])
self.ports.append(p)
return p
def getprefixes(self):
"""
Add prefixes foreach namespace referenced by parameter types.
"""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns)
def paramtypes(self):
""" get all parameter types """
for m in [p[1] for p in self.ports]:
for p in [p[1] for p in m]:
for pd in p:
if pd[1] in self.params: continue
item = (pd[1], pd[1].resolve())
self.params.append(item)
def publictypes(self):
""" get all public types """
for t in self.wsdl.schema.types.values():
if t in self.params: continue
if t in self.types: continue
item = (t, t)
self.types.append(item)
tc = lambda x,y: cmp(x[0].name, y[0].name)
self.types.sort(cmp=tc)
def nextprefix(self):
"""
Get the next available prefix. This means a prefix starting with 'ns' with
a number appended as (ns0, ns1, ..) that is not already defined on the
wsdl document.
"""
used = [ns[0] for ns in self.prefixes]
used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()]
for n in range(0,1024):
p = 'ns%d'%n
if p not in used:
return p
raise Exception('prefixes exhausted')
def getprefix(self, u):
"""
Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
"""
for ns in Namespace.all:
if u == ns[1]: return ns[0]
for ns in self.prefixes:
if u == ns[1]: return ns[0]
raise Exception('ns (%s) not mapped' % u)
def xlate(self, type):
"""
Get a (namespace) translated I{qualified} name for specified type.
@param type: A schema type.
@type type: I{suds.xsd.sxbasic.SchemaObject}
@return: A translated I{qualified} name.
@rtype: str
"""
resolved = type.resolve()
name = resolved.name
if type.unbounded():
name += '[]'
ns = resolved.namespace()
if ns[1] == self.wsdl.tns[1]:
return name
prefix = self.getprefix(ns[1])
return ':'.join((prefix, name))
def description(self):
"""
Get a textual description of the service for which this object represents.
@return: A textual description.
@rtype: str
"""
s = []
indent = (lambda n : '\n%*s'%(n*3,' '))
s.append('Service ( %s ) tns="%s"' % (self.service.name, self.wsdl.tns[1]))
s.append(indent(1))
s.append('Prefixes (%d)' % len(self.prefixes))
for p in self.prefixes:
s.append(indent(2))
s.append('%s = "%s"' % p)
s.append(indent(1))
s.append('Ports (%d):' % len(self.ports))
for p in self.ports:
s.append(indent(2))
s.append('(%s)' % p[0].name)
s.append(indent(3))
s.append('Methods (%d):' % len(p[1]))
for m in p[1]:
sig = []
s.append(indent(4))
sig.append(m[0])
sig.append('(')
for p in m[1]:
sig.append(self.xlate(p[1]))
sig.append(' ')
sig.append(p[0])
sig.append(', ')
sig.append(')')
try:
s.append(''.join(sig))
except:
pass
s.append(indent(3))
s.append('Types (%d):' % len(self.types))
for t in self.types:
s.append(indent(4))
s.append(self.xlate(t[0]))
s.append('\n\n')
return ''.join(s)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
try:
return self.description()
except Exception, e:
log.exception(e)
return tostr(e) | lgpl-3.0 | -1,232,254,896,157,656,600 | 33.189516 | 84 | 0.523355 | false |
openstack/ironic | ironic/hacking/checks.py | 1 | 1950 | # Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from hacking import core
# N323: Found use of _() without explicit import of _!
UNDERSCORE_IMPORT_FILES = []
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
r"\(\s*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
@core.flake8ext
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line)
or custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line)
or string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _!")
| apache-2.0 | -2,989,298,579,154,447,400 | 35.111111 | 75 | 0.698462 | false |
divio/django | django/test/runner.py | 148 | 14807 | import logging
import os
import unittest
from importlib import import_module
from unittest import TestSuite, defaultTestLoader
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
from django.utils.datastructures import OrderedSet
from django.utils.six import StringIO
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super(DebugSQLTextTestResult, self).__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super(DebugSQLTextTestResult, self).startTest(test)
def stopTest(self, test):
super(DebugSQLTextTestResult, self).stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super(DebugSQLTextTestResult, self).addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super(DebugSQLTextTestResult, self).addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class DiscoverRunner(object):
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = TestSuite
test_runner = unittest.TextTestRunner
test_loader = defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_sql=False, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_sql = debug_sql
@classmethod
def add_arguments(cls, parser):
parser.add_argument('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.')
parser.add_argument('-p', '--pattern', action='store', dest='pattern',
default="test*.py",
help='The test matching pattern. Defaults to test*.py.')
parser.add_argument('-k', '--keepdb', action='store_true', dest='keepdb',
default=False,
help='Preserves the test DB between runs.')
parser.add_argument('-r', '--reverse', action='store_true', dest='reverse',
default=False,
help='Reverses test cases order.')
parser.add_argument('-d', '--debug-sql', action='store_true', dest='debug_sql',
default=False,
help='Prints logged SQL queries on failure.')
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, self.reorder_by, self.reverse)
def setup_databases(self, **kwargs):
return setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
**kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=resultclass,
).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity, self.keepdb)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def reorder_suite(suite, classes, reverse=False):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, tests within classes are sorted in opposite order,
but test classes are not reversed.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite(suite, classes, bins, reverse=False):
"""
Partitions a test suite by test type. Also prevents duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names, mirrors
| bsd-3-clause | 1,929,785,634,139,747,800 | 37.360104 | 104 | 0.612075 | false |
tiborsimko/invenio-pidstore | invenio_pidstore/providers/base.py | 1 | 3776 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Module storing implementations of PID providers."""
from __future__ import absolute_import, print_function
from ..models import PersistentIdentifier, PIDStatus
class BaseProvider(object):
"""Abstract class for persistent identifier provider classes."""
pid_type = None
"""Default persistent identifier type."""
pid_provider = None
"""Persistent identifier provider name."""
default_status = PIDStatus.NEW
"""Default status for newly created PIDs by this provider."""
@classmethod
def create(cls, pid_type=None, pid_value=None, object_type=None,
object_uuid=None, status=None, **kwargs):
"""Create a new instance for the given type and pid.
:param pid_type: Persistent identifier type. (Default: None).
:param pid_value: Persistent identifier value. (Default: None).
:param status: Current PID status.
(Default: :attr:`invenio_pidstore.models.PIDStatus.NEW`)
:param object_type: The object type is a string that identify its type.
(Default: None).
:param object_uuid: The object UUID. (Default: None).
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
assert pid_value
assert pid_type or cls.pid_type
pid = PersistentIdentifier.create(
pid_type or cls.pid_type,
pid_value,
pid_provider=cls.pid_provider,
object_type=object_type,
object_uuid=object_uuid,
status=status or cls.default_status,
)
return cls(pid, **kwargs)
@classmethod
def get(cls, pid_value, pid_type=None, **kwargs):
"""Get a persistent identifier for this provider.
:param pid_type: Persistent identifier type. (Default: configured
:attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`)
:param pid_value: Persistent identifier value.
:param kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider` required
initialization properties.
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
return cls(
PersistentIdentifier.get(pid_type or cls.pid_type, pid_value,
pid_provider=cls.pid_provider),
**kwargs)
def __init__(self, pid):
"""Initialize provider using persistent identifier.
:param pid: A :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
"""
self.pid = pid
assert pid.pid_provider == self.pid_provider
def reserve(self):
"""Reserve a persistent identifier.
This might or might not be useful depending on the service of the
provider.
See: :meth:`invenio_pidstore.models.PersistentIdentifier.reserve`.
"""
return self.pid.reserve()
def register(self):
"""Register a persistent identifier.
See: :meth:`invenio_pidstore.models.PersistentIdentifier.register`.
"""
return self.pid.register()
def update(self):
"""Update information about the persistent identifier."""
pass
def delete(self):
"""Delete a persistent identifier.
See: :meth:`invenio_pidstore.models.PersistentIdentifier.delete`.
"""
return self.pid.delete()
def sync_status(self):
"""Synchronize PIDstatus with remote service provider."""
pass
| mit | -7,757,997,432,618,151,000 | 32.415929 | 79 | 0.628178 | false |
djw8605/htcondor | src/condor_contrib/campus_factory/python-lib/campus_factory/Parsers.py | 7 | 3591 | import logging
import xml.sax.handler
import os
from select import select
from campus_factory.util.ExternalCommands import RunExternal
class AvailableGlideins(xml.sax.handler.ContentHandler, object):
# Command to query the collector for available glideins
command = "condor_status -avail -const '(IsUndefined(Offline) == True) || (Offline == false)' -format '<glidein name=\"%s\"/>' 'Name'"
def __init__(self):
self.owner_idle = {}
pass
def GetIdle(self):
self.idle = 0
self.found = False
# Get the xml from the collector
to_parse, stderr = RunExternal(self.command)
formatted_to_parse = "<doc>%s</doc>" % to_parse
# Parse the data
try:
xml.sax.parseString(formatted_to_parse, self)
except xml.sax._exceptions.SAXParseException, inst:
logging.error("Error parsing:")
logging.error("command = %s" % self.command)
logging.error("stderr = %s" % stderr)
logging.error("stdout = %s" % to_parse)
logging.error("Error: %s - %s" % ( str(inst), inst.args ))
if not self.found and (len(stderr) != 0):
logging.error("No valid output received from command: %s"% self.command)
logging.error("stderr = %s" % stderr)
logging.error("stdout = %s" % to_parse)
return None
return self.idle
def Run(self):
"""
Generic function for when this class is inherited
"""
return self.GetIdle()
def startElement(self, name, attributes):
if name == "glidein":
self.idle += 1
self.found = True
if not self.owner_idle.has_key(attributes['owner']):
self.owner_idle[attributes['owner']] = 0
self.owner_idle[attributes['owner']] += 1
def GetOwnerIdle(self):
return self.owner_idle
class IdleGlideins(AvailableGlideins):
command = "condor_q -const '(GlideinJob == true) && (JobStatus == 1)' -format '<glidein owner=\"%s\"/>' 'Owner'"
class IdleJobs(AvailableGlideins):
command = "condor_q -name %s -const '(GlideinJob =!= true) && (JobStatus == 1) && (JobUniverse == 5)' -format '<glidein owner=\"%%s\"/>' 'Owner'"
def __init__(self, schedd):
super(IdleJobs, self).__init__()
self.command = self.command % schedd
class IdleLocalJobs(AvailableGlideins):
command = "condor_q -const '(GlideinJob =!= true) && (JobStatus == 1) && (JobUniverse == 5)' -format '<glidein owner=\"%s\"/>' 'Owner'"
class FactoryID(AvailableGlideins):
command = "condor_q -const '(IsUndefined(IsFactory) == FALSE)' -format '<factory id=\"%s\"/>' 'ClusterId'"
def startElement(self, name, attributes):
if name == "factory":
self.factory_id = attributes.getValue("id")
self.found = True
def GetId(self):
self.GetIdle()
return self.factory_id
class RunningGlideinsJobs(AvailableGlideins):
"""
Gets the number of running glidein jobs (condor_q)
"""
command = "condor_q -const '(GlideinJob == true) && (JobStatus == 2)' -format '<glidein owner=\"%s\"/>' 'Owner'"
class RunningGlideins(AvailableGlideins):
"""
Returns the number of startd's reporting to the collector (condor_status)
"""
command = "condor_status -const '(IsUndefined(IS_GLIDEIN) == FALSE) && (IS_GLIDEIN == TRUE) && (IsUndefined(Offline))' -format '<glidein name=\"%s\"/>' 'Name'"
| apache-2.0 | -8,766,301,105,742,628,000 | 31.351351 | 163 | 0.584238 | false |
linuxlewis/channels | channels/asgi.py | 4 | 3137 | from __future__ import unicode_literals
import django
from django.conf import settings
from django.utils.module_loading import import_string
from .routing import Router
from .utils import name_that_thing
class InvalidChannelLayerError(ValueError):
pass
class ChannelLayerManager(object):
"""
Takes a settings dictionary of backends and initialises them on request.
"""
def __init__(self):
self.backends = {}
@property
def configs(self):
# Lazy load settings so we can be imported
return getattr(settings, "CHANNEL_LAYERS", {})
def make_backend(self, name):
# Load the backend class
try:
backend_class = import_string(self.configs[name]['BACKEND'])
except KeyError:
raise InvalidChannelLayerError("No BACKEND specified for %s" % name)
except ImportError:
raise InvalidChannelLayerError(
"Cannot import BACKEND %r specified for %s" % (self.configs[name]['BACKEND'], name)
)
# Get routing
try:
routing = self.configs[name]['ROUTING']
except KeyError:
raise InvalidChannelLayerError("No ROUTING specified for %s" % name)
# Initialise and pass config
asgi_layer = backend_class(**self.configs[name].get("CONFIG", {}))
return ChannelLayerWrapper(
channel_layer=asgi_layer,
alias=name,
routing=routing,
)
def __getitem__(self, key):
if key not in self.backends:
self.backends[key] = self.make_backend(key)
return self.backends[key]
def __contains__(self, key):
return key in self.configs
def set(self, key, layer):
"""
Sets an alias to point to a new ChannelLayerWrapper instance, and
returns the old one that it replaced. Useful for swapping out the
backend during tests.
"""
old = self.backends.get(key, None)
self.backends[key] = layer
return old
class ChannelLayerWrapper(object):
"""
Top level channel layer wrapper, which contains both the ASGI channel
layer object as well as alias and routing information specific to Django.
"""
def __init__(self, channel_layer, alias, routing):
self.channel_layer = channel_layer
self.alias = alias
self.routing = routing
self.router = Router(self.routing)
def __getattr__(self, name):
return getattr(self.channel_layer, name)
def __str__(self):
return "%s (%s)" % (self.alias, name_that_thing(self.channel_layer))
def local_only(self):
# TODO: Can probably come up with a nicer check?
return "inmemory" in self.channel_layer.__class__.__module__
def get_channel_layer(alias="default"):
"""
Returns the raw ASGI channel layer for this project.
"""
if django.VERSION[1] > 9:
django.setup(set_prefix=False)
else:
django.setup()
return channel_layers[alias].channel_layer
# Default global instance of the channel layer manager
channel_layers = ChannelLayerManager()
| bsd-3-clause | 1,765,793,476,120,319,700 | 28.87619 | 99 | 0.628626 | false |
dyoung418/tensorflow | tensorflow/python/keras/_impl/keras/applications/xception_test.py | 35 | 2109 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Xception application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class XceptionTest(test.TestCase):
def test_with_top(self):
model = keras.applications.Xception(weights=None)
self.assertEqual(model.output_shape, (None, 1000))
def test_no_top(self):
model = keras.applications.Xception(weights=None, include_top=False)
self.assertEqual(model.output_shape, (None, None, None, 2048))
def test_with_pooling(self):
model = keras.applications.Xception(weights=None,
include_top=False,
pooling='avg')
self.assertEqual(model.output_shape, (None, 2048))
def test_weight_loading(self):
with self.assertRaises(ValueError):
keras.applications.Xception(weights='unknown',
include_top=False)
with self.assertRaises(ValueError):
keras.applications.Xception(weights='imagenet',
classes=2000)
def test_preprocess_input(self):
x = np.random.uniform(0, 255, (2, 300, 200, 3))
out1 = keras.applications.xception.preprocess_input(x)
self.assertAllClose(np.mean(out1), 0., atol=0.1)
if __name__ == '__main__':
test.main()
| apache-2.0 | 5,480,835,310,949,082,000 | 36 | 80 | 0.655761 | false |
romankagan/DDBWorkbench | python/lib/Lib/HTMLParser.py | 103 | 12662 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("'", "'")
s = s.replace(""", '"')
s = s.replace("&", "&") # Must be last
return s
| apache-2.0 | -3,431,034,967,023,642,600 | 33.314363 | 76 | 0.50387 | false |
axinging/chromium-crosswalk | third_party/Python-Markdown/markdown/extensions/meta.py | 114 | 2400 | """
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://pythonhosted.org/Markdown/extensions/meta_data.html>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta",
MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)
| bsd-3-clause | 2,012,320,851,609,054,500 | 29.769231 | 74 | 0.552083 | false |
chanderbgoel/pybrain | pybrain/supervised/evolino/filter.py | 25 | 9839 | from __future__ import print_function
__author__ = 'Michael Isik'
from pybrain.supervised.evolino.gfilter import Filter, SimpleMutation
from pybrain.supervised.evolino.variate import CauchyVariate
from pybrain.supervised.evolino.population import SimplePopulation
from pybrain.tools.validation import Validator
from pybrain.tools.kwargsprocessor import KWArgsProcessor
from numpy import array, dot, concatenate, Infinity
from scipy.linalg import pinv2
from copy import deepcopy
class EvolinoEvaluation(Filter):
""" Evaluate all individuals of the Evolino population, and store their
fitness value inside the population.
"""
def __init__(self, evolino_network, dataset, **kwargs):
""" :key evolino_network: an instance of NetworkWrapper()
:key dataset: The evaluation dataset
:key evalfunc: Compares output to target values and returns a scalar, denoting the fitness.
Defaults to -mse(output, target).
:key wtRatio: Float array of two values denoting the ratio between washout and training length.
Defaults to [1,2]
:key verbosity: Verbosity level. Defaults to 0
"""
Filter.__init__(self)
ap = KWArgsProcessor(self, kwargs)
ap.add('verbosity', default=0)
ap.add('evalfunc', default=lambda output, target:-Validator.MSE(output, target))
ap.add('wtRatio', default=array([1, 2], float))
self.network = evolino_network
self.dataset = dataset
self.max_fitness = -Infinity
def _evaluateNet(self, net, dataset, wtRatio):
""" Evaluates the performance of net on the given dataset.
Returns the fitness value.
:key net: Instance of EvolinoNetwork to evaluate
:key dataset: Sequences to test the net on
:key wtRatio: See __init__
"""
# === extract sequences from dataset ===
numSequences = dataset.getNumSequences()
washout_sequences = []
training_sequences = []
for i in range(numSequences):
sequence = dataset.getSequence(i)[1]
training_start = int(wtRatio * len(sequence))
washout_sequences.append(sequence[ : training_start ])
training_sequences.append(sequence[ training_start : ])
# === collect raw output (denoted by phi) ===
phis = []
for i in range(numSequences):
net.reset()
net.washout(washout_sequences[i])
phi = net.washout(training_sequences[i])
phis.append(phi)
# === calculate and set weights of linear output layer ===
PHI = concatenate(phis).T
PHI_INV = pinv2(PHI)
TARGET = concatenate(training_sequences).T
W = dot(TARGET, PHI_INV)
net.setOutputWeightMatrix(W)
# === collect outputs by applying the newly configured network ===
outputs = []
for i in range(numSequences):
out = net.extrapolate(washout_sequences[i], len(training_sequences[i]))
outputs.append(out)
# === calculate fitness value ===
OUTPUT = concatenate(outputs)
TARGET = concatenate(training_sequences)
fitness = self.evalfunc(OUTPUT, TARGET)
return fitness
def apply(self, population):
""" Evaluate each individual, and store fitness inside population.
Also calculate and set the weight matrix W of the linear output layer.
:arg population: Instance of EvolinoPopulation
"""
net = self.network
dataset = self.dataset
population.clearFitness()
best_W = None
best_fitness = -Infinity
# iterate all individuals. Note, that these individuals are created on the fly
for individual in population.getIndividuals():
# load the individual's genome into the weights of the net
net.setGenome(individual.getGenome())
fitness = self._evaluateNet(net, dataset, self.wtRatio)
if self.verbosity > 1:
print(("Calculated fitness for individual", id(individual), " is ", fitness))
# set the individual fitness
population.setIndividualFitness(individual, fitness)
if best_fitness < fitness:
best_fitness = fitness
best_genome = deepcopy(individual.getGenome())
best_W = deepcopy(net.getOutputWeightMatrix())
net.reset()
net.setGenome(best_genome)
net.setOutputWeightMatrix(best_W)
# store fitness maximum to use it for triggering burst mutation
self.max_fitness = best_fitness
class EvolinoSelection(Filter):
""" Evolino's selection operator.
Set its nParents attribute at any time.
nParents specifies the number of individuals not to be deleted.
If nParents equals None, EvolinoSubSelection will use its
default value.
"""
def __init__(self):
Filter.__init__(self)
self.nParents = None
self.sub_selection = EvolinoSubSelection()
def apply(self, population):
""" The subpopulations of the EvolinoPopulation are iterated and forwarded
to the EvolinoSubSelection() operator.
:arg population: object of type EvolinoPopulation
"""
self.sub_selection.nParents = self.nParents
for sp in population.getSubPopulations():
self.sub_selection.apply(sp)
class EvolinoReproduction(Filter):
""" Evolino's reproduction operator """
def __init__(self, **kwargs):
""" :key **kwargs: will be forwarded to the EvolinoSubReproduction constructor
"""
Filter.__init__(self)
self._kwargs = kwargs
def apply(self, population):
""" The subpopulations of the EvolinoPopulation are iterated and forwarded
to the EvolinoSubReproduction() operator.
:arg population: object of type EvolinoPopulation
"""
sps = population.getSubPopulations()
reproduction = EvolinoSubReproduction(**self._kwargs)
for sp in sps:
reproduction.apply(sp)
class EvolinoBurstMutation(Filter):
""" The burst mutation operator for evolino """
def __init__(self, **kwargs):
""" :key **kwargs: will be forwarded to the EvolinoSubReproduction constructor
"""
Filter.__init__(self)
self._kwargs = kwargs
def apply(self, population):
""" Keeps just the best fitting individual of each subpopulation.
All other individuals are erased. After that, the kept best fitting
individuals will be used for reproduction, in order to refill the
sub-populations.
"""
sps = population.getSubPopulations()
for sp in sps:
n_toremove = sp.getIndividualsN() - 1
sp.removeWorstIndividuals(n_toremove)
reproduction = EvolinoSubReproduction(**self._kwargs)
reproduction.apply(sp)
# ==================================================== SubPopulation related ===
class EvolinoSubSelection(Filter):
""" Selection operator for EvolinoSubPopulation objects
Specify its nParents attribute at any time. See EvolinoSelection.
"""
def __init__(self):
Filter.__init__(self)
def apply(self, population):
""" Simply removes some individuals with lowest fitness values
"""
n = population.getIndividualsN()
if self.nParents is None:
nKeep = n // 4
else:
nKeep = self.nParents
assert nKeep >= 0
assert nKeep <= n
population.removeWorstIndividuals(n - nKeep)
class EvolinoSubReproduction(Filter):
""" Reproduction operator for EvolinoSubPopulation objects.
"""
def __init__(self, **kwargs):
""" :key verbosity: Verbosity level
:key mutationVariate: Variate used for mutation. Defaults to None
:key mutation: Defaults to EvolinoSubMutation
"""
Filter.__init__(self)
ap = KWArgsProcessor(self, kwargs)
ap.add('verbosity', default=0)
ap.add('mutationVariate', default=None)
ap.add('mutation', default=EvolinoSubMutation())
if self.mutationVariate is not None:
self.mutation.mutationVariate = self.mutationVariate
def apply(self, population):
""" First determines the number of individuals to be created.
Then clones the fittest individuals (=parents), mutates these clones
and adds them to the population.
"""
max_n = population.getMaxNIndividuals()
n = population.getIndividualsN()
freespace = max_n - n
best = population.getBestIndividualsSorted(freespace)
children = set()
while True:
if len(children) >= freespace: break
for parent in best:
children.add(parent.copy())
if len(children) >= freespace: break
dummy_population = SimplePopulation()
dummy_population.addIndividuals(children)
self.mutation.apply(dummy_population)
population.addIndividuals(dummy_population.getIndividuals())
assert population.getMaxNIndividuals() == population.getIndividualsN()
class EvolinoSubMutation(SimpleMutation):
""" Mutation operator for EvolinoSubPopulation objects.
Like SimpleMutation, except, that CauchyVariate is used by default.
"""
def __init__(self, **kwargs):
SimpleMutation.__init__(self)
ap = KWArgsProcessor(self, kwargs)
ap.add('mutationVariate', default=CauchyVariate())
self.mutationVariate.alpha = 0.001
| bsd-3-clause | 4,108,481,745,914,322,400 | 31.57947 | 107 | 0.622828 | false |
Shrews/PyGerrit | webapp/django/contrib/localflavor/es/es_provinces.py | 436 | 1482 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
('01', _('Arava')),
('02', _('Albacete')),
('03', _('Alacant')),
('04', _('Almeria')),
('05', _('Avila')),
('06', _('Badajoz')),
('07', _('Illes Balears')),
('08', _('Barcelona')),
('09', _('Burgos')),
('10', _('Caceres')),
('11', _('Cadiz')),
('12', _('Castello')),
('13', _('Ciudad Real')),
('14', _('Cordoba')),
('15', _('A Coruna')),
('16', _('Cuenca')),
('17', _('Girona')),
('18', _('Granada')),
('19', _('Guadalajara')),
('20', _('Guipuzkoa')),
('21', _('Huelva')),
('22', _('Huesca')),
('23', _('Jaen')),
('24', _('Leon')),
('25', _('Lleida')),
('26', _('La Rioja')),
('27', _('Lugo')),
('28', _('Madrid')),
('29', _('Malaga')),
('30', _('Murcia')),
('31', _('Navarre')),
('32', _('Ourense')),
('33', _('Asturias')),
('34', _('Palencia')),
('35', _('Las Palmas')),
('36', _('Pontevedra')),
('37', _('Salamanca')),
('38', _('Santa Cruz de Tenerife')),
('39', _('Cantabria')),
('40', _('Segovia')),
('41', _('Seville')),
('42', _('Soria')),
('43', _('Tarragona')),
('44', _('Teruel')),
('45', _('Toledo')),
('46', _('Valencia')),
('47', _('Valladolid')),
('48', _('Bizkaia')),
('49', _('Zamora')),
('50', _('Zaragoza')),
('51', _('Ceuta')),
('52', _('Melilla')),
)
| apache-2.0 | -4,091,995,794,632,070,700 | 24.551724 | 55 | 0.375169 | false |
abhiQmar/servo | tests/wpt/web-platform-tests/check_stability.py | 9 | 26373 | from __future__ import print_function
import argparse
import logging
import os
import re
import stat
import subprocess
import sys
import tarfile
import zipfile
from abc import ABCMeta, abstractmethod
from cStringIO import StringIO as CStringIO
from collections import defaultdict
from ConfigParser import RawConfigParser
from io import BytesIO, StringIO
import requests
BaseHandler = None
LogActionFilter = None
LogHandler = None
LogLevelFilter = None
StreamHandler = None
TbplFormatter = None
manifest = None
reader = None
wptcommandline = None
wptrunner = None
wpt_root = None
wptrunner_root = None
logger = None
def do_delayed_imports():
"""Import and set up modules only needed if execution gets to this point."""
global BaseHandler
global LogLevelFilter
global StreamHandler
global TbplFormatter
global manifest
global reader
global wptcommandline
global wptrunner
from mozlog import reader
from mozlog.formatters import TbplFormatter
from mozlog.handlers import BaseHandler, LogLevelFilter, StreamHandler
from tools.manifest import manifest
from wptrunner import wptcommandline, wptrunner
setup_log_handler()
setup_action_filter()
def setup_logging():
"""Set up basic debug logger."""
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def setup_action_filter():
"""Create global LogActionFilter class as part of deferred module load."""
global LogActionFilter
class LogActionFilter(BaseHandler):
"""Handler that filters out messages not of a given set of actions.
Subclasses BaseHandler.
:param inner: Handler to use for messages that pass this filter
:param actions: List of actions for which to fire the handler
"""
def __init__(self, inner, actions):
"""Extend BaseHandler and set inner and actions props on self."""
BaseHandler.__init__(self, inner)
self.inner = inner
self.actions = actions
def __call__(self, item):
"""Invoke handler if action is in list passed as constructor param."""
if item["action"] in self.actions:
return self.inner(item)
class TravisFold(object):
"""Context for TravisCI folding mechanism. Subclasses object.
See: https://blog.travis-ci.com/2013-05-22-improving-build-visibility-log-folds/
"""
def __init__(self, name):
"""Register TravisCI folding section name."""
self.name = name
def __enter__(self):
"""Emit fold start syntax."""
print("travis_fold:start:%s" % self.name, file=sys.stderr)
def __exit__(self, type, value, traceback):
"""Emit fold end syntax."""
print("travis_fold:end:%s" % self.name, file=sys.stderr)
class FilteredIO(object):
"""Wrap a file object, invoking the provided callback for every call to
`write` and only proceeding with the operation when that callback returns
True."""
def __init__(self, original, on_write):
self.original = original
self.on_write = on_write
def __getattr__(self, name):
return getattr(self.original, name)
def disable(self):
self.write = lambda msg: None
def write(self, msg):
encoded = msg.encode("utf8", "backslashreplace").decode("utf8")
if self.on_write(self.original, encoded) is True:
self.original.write(encoded)
def replace_streams(capacity, warning_msg):
# Value must be boxed to support modification from inner function scope
count = [0]
capacity -= 2 + len(warning_msg)
stderr = sys.stderr
def on_write(handle, msg):
length = len(msg)
count[0] += length
if count[0] > capacity:
sys.stdout.disable()
sys.stderr.disable()
handle.write(msg[0:capacity - count[0]])
handle.flush()
stderr.write("\n%s\n" % warning_msg)
return False
return True
sys.stdout = FilteredIO(sys.stdout, on_write)
sys.stderr = FilteredIO(sys.stderr, on_write)
class Browser(object):
__metaclass__ = ABCMeta
@abstractmethod
def install(self):
return NotImplemented
@abstractmethod
def install_webdriver(self):
return NotImplemented
@abstractmethod
def version(self):
return NotImplemented
@abstractmethod
def wptrunner_args(self):
return NotImplemented
class Firefox(Browser):
"""Firefox-specific interface.
Includes installation, webdriver installation, and wptrunner setup methods.
"""
product = "firefox"
binary = "%s/firefox/firefox"
platform_ini = "%s/firefox/platform.ini"
def install(self):
"""Install Firefox."""
call("pip", "install", "-r", os.path.join(wptrunner_root, "requirements_firefox.txt"))
resp = get("https://archive.mozilla.org/pub/firefox/nightly/latest-mozilla-central/firefox-53.0a1.en-US.linux-x86_64.tar.bz2")
untar(resp.raw)
if not os.path.exists("profiles"):
os.mkdir("profiles")
with open(os.path.join("profiles", "prefs_general.js"), "wb") as f:
resp = get("https://hg.mozilla.org/mozilla-central/raw-file/tip/testing/profiles/prefs_general.js")
f.write(resp.content)
call("pip", "install", "-r", os.path.join(wptrunner_root, "requirements_firefox.txt"))
def _latest_geckodriver_version(self):
"""Get and return latest version number for geckodriver."""
# This is used rather than an API call to avoid rate limits
tags = call("git", "ls-remote", "--tags", "--refs",
"https://github.com/mozilla/geckodriver.git")
release_re = re.compile(".*refs/tags/v(\d+)\.(\d+)\.(\d+)")
latest_release = 0
for item in tags.split("\n"):
m = release_re.match(item)
if m:
version = [int(item) for item in m.groups()]
if version > latest_release:
latest_release = version
assert latest_release != 0
return "v%s.%s.%s" % tuple(str(item) for item in latest_release)
def install_webdriver(self):
"""Install latest Geckodriver."""
version = self._latest_geckodriver_version()
logger.debug("Latest geckodriver release %s" % version)
url = "https://github.com/mozilla/geckodriver/releases/download/%s/geckodriver-%s-linux64.tar.gz" % (version, version)
untar(get(url).raw)
def version(self, root):
"""Retrieve the release version of the installed browser."""
platform_info = RawConfigParser()
with open(self.platform_ini % root, "r") as fp:
platform_info.readfp(BytesIO(fp.read()))
return "BuildID %s; SourceStamp %s" % (
platform_info.get("Build", "BuildID"),
platform_info.get("Build", "SourceStamp"))
def wptrunner_args(self, root):
"""Return Firefox-specific wpt-runner arguments."""
return {
"product": "firefox",
"binary": self.binary % root,
"certutil_binary": "certutil",
"webdriver_binary": "%s/geckodriver" % root,
"prefs_root": "%s/profiles" % root,
}
class Chrome(Browser):
"""Chrome-specific interface.
Includes installation, webdriver installation, and wptrunner setup methods.
"""
product = "chrome"
binary = "/usr/bin/google-chrome"
def install(self):
"""Install Chrome."""
# Installing the Google Chrome browser requires administrative
# privileges, so that installation is handled by the invoking script.
call("pip", "install", "-r", os.path.join(wptrunner_root, "requirements_chrome.txt"))
def install_webdriver(self):
"""Install latest Webdriver."""
latest = get("http://chromedriver.storage.googleapis.com/LATEST_RELEASE").text.strip()
url = "http://chromedriver.storage.googleapis.com/%s/chromedriver_linux64.zip" % latest
unzip(get(url).raw)
st = os.stat('chromedriver')
os.chmod('chromedriver', st.st_mode | stat.S_IEXEC)
def version(self, root):
"""Retrieve the release version of the installed browser."""
output = call(self.binary, "--version")
return re.search(r"[0-9\.]+( [a-z]+)?$", output.strip()).group(0)
def wptrunner_args(self, root):
"""Return Chrome-specific wpt-runner arguments."""
return {
"product": "chrome",
"binary": self.binary,
"webdriver_binary": "%s/chromedriver" % root,
"test_types": ["testharness", "reftest"]
}
def get(url):
"""Issue GET request to a given URL and return the response."""
logger.debug("GET %s" % url)
resp = requests.get(url, stream=True)
resp.raise_for_status()
return resp
def call(*args):
"""Log terminal command, invoke it as a subprocess.
Returns a bytestring of the subprocess output if no error.
"""
logger.debug("%s" % " ".join(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
logger.critical("%s exited with return code %i" %
(e.cmd, e.returncode))
logger.critical(e.output)
raise
def get_git_cmd(repo_path):
"""Create a function for invoking git commands as a subprocess."""
def git(cmd, *args):
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.error("Git command exited with status %i" % e.returncode)
logger.error(e.output)
sys.exit(1)
return git
def seekable(fileobj):
"""Attempt to use file.seek on given file, with fallbacks."""
try:
fileobj.seek(fileobj.tell())
except Exception:
return CStringIO(fileobj.read())
else:
return fileobj
def untar(fileobj):
"""Extract tar archive."""
logger.debug("untar")
fileobj = seekable(fileobj)
with tarfile.open(fileobj=fileobj) as tar_data:
tar_data.extractall()
def unzip(fileobj):
"""Extract zip archive."""
logger.debug("unzip")
fileobj = seekable(fileobj)
with zipfile.ZipFile(fileobj) as zip_data:
for info in zip_data.infolist():
zip_data.extract(info)
perm = info.external_attr >> 16 & 0x1FF
os.chmod(info.filename, perm)
class pwd(object):
"""Create context for temporarily changing present working directory."""
def __init__(self, dir):
self.dir = dir
self.old_dir = None
def __enter__(self):
self.old_dir = os.path.abspath(os.curdir)
os.chdir(self.dir)
def __exit__(self, *args, **kwargs):
os.chdir(self.old_dir)
self.old_dir = None
def fetch_wpt_master(user):
"""Fetch the master branch via git."""
git = get_git_cmd(wpt_root)
git("fetch", "https://github.com/%s/web-platform-tests.git" % user, "master:master")
def get_sha1():
""" Get and return sha1 of current git branch HEAD commit."""
git = get_git_cmd(wpt_root)
return git("rev-parse", "HEAD").strip()
def build_manifest():
"""Build manifest of all files in web-platform-tests"""
with pwd(wpt_root):
# TODO: Call the manifest code directly
call("python", "manifest")
def install_wptrunner():
"""Clone and install wptrunner."""
call("git", "clone", "--depth=1", "https://github.com/w3c/wptrunner.git", wptrunner_root)
git = get_git_cmd(wptrunner_root)
git("submodule", "update", "--init", "--recursive")
call("pip", "install", wptrunner_root)
def get_files_changed():
"""Get and return files changed since current branch diverged from master."""
root = os.path.abspath(os.curdir)
git = get_git_cmd(wpt_root)
branch_point = git("merge-base", "HEAD", "master").strip()
logger.debug("Branch point from master: %s" % branch_point)
files = git("diff", "--name-only", "-z", "%s.." % branch_point)
if not files:
return []
assert files[-1] == "\0"
return [os.path.join(wpt_root, item)
for item in files[:-1].split("\0")]
def get_affected_testfiles(files_changed):
"""Determine and return list of test files that reference changed files."""
affected_testfiles = set()
nontests_changed = set(files_changed)
manifest_file = os.path.join(wpt_root, "MANIFEST.json")
skip_dirs = ["conformance-checkers", "docs", "tools"]
test_types = ["testharness", "reftest", "wdspec"]
wpt_manifest = manifest.load(wpt_root, manifest_file)
support_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes("support")}
test_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes(*test_types)}
nontests_changed = nontests_changed.intersection(support_files)
nontest_changed_paths = set()
for full_path in nontests_changed:
rel_path = os.path.relpath(full_path, wpt_root)
path_components = rel_path.split(os.sep)
if len(path_components) < 2:
# This changed file is in the repo root, so skip it
# (because it's not part of any test).
continue
top_level_subdir = path_components[0]
if top_level_subdir in skip_dirs:
continue
repo_path = "/" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, "/")
nontest_changed_paths.add((full_path, repo_path))
for root, dirs, fnames in os.walk(wpt_root):
# Walk top_level_subdir looking for test files containing either the
# relative filepath or absolute filepatch to the changed files.
if root == wpt_root:
for dir_name in skip_dirs:
dirs.remove(dir_name)
for fname in fnames:
test_full_path = os.path.join(root, fname)
# Skip any file that's not a test file.
if test_full_path not in test_files:
continue
with open(test_full_path, "rb") as fh:
file_contents = fh.read()
if file_contents.startswith("\xfe\xff"):
file_contents = file_contents.decode("utf-16be")
elif file_contents.startswith("\xff\xfe"):
file_contents = file_contents.decode("utf-16le")
for full_path, repo_path in nontest_changed_paths:
rel_path = os.path.relpath(full_path, root).replace(os.path.sep, "/")
if rel_path in file_contents or repo_path in file_contents:
affected_testfiles.add(test_full_path)
continue
return affected_testfiles
def wptrunner_args(root, files_changed, iterations, browser):
"""Derive and return arguments for wpt-runner."""
parser = wptcommandline.create_parser([browser.product])
args = vars(parser.parse_args([]))
args.update(browser.wptrunner_args(root))
args.update({
"tests_root": wpt_root,
"metadata_root": wpt_root,
"repeat": iterations,
"config": "%s//wptrunner.default.ini" % (wptrunner_root),
"test_list": files_changed,
"restart_on_unexpected": False,
"pause_after_test": False
})
wptcommandline.check_args(args)
return args
def setup_log_handler():
"""Set up LogHandler class as part of deferred module load."""
global LogHandler
class LogHandler(reader.LogHandler):
"""Handle updating test and subtest status in log.
Subclasses reader.LogHandler.
"""
def __init__(self):
self.results = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
def test_status(self, data):
self.results[data["test"]][data.get("subtest")][data["status"]] += 1
def test_end(self, data):
self.results[data["test"]][None][data["status"]] += 1
def is_inconsistent(results_dict, iterations):
"""Return whether or not a single test is inconsistent."""
return len(results_dict) > 1 or sum(results_dict.values()) != iterations
def err_string(results_dict, iterations):
"""Create and return string with errors from test run."""
rv = []
total_results = sum(results_dict.values())
for key, value in sorted(results_dict.items()):
rv.append("%s%s" %
(key, ": %s/%s" % (value, iterations) if value != iterations else ""))
rv = ", ".join(rv)
if total_results < iterations:
rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
if len(results_dict) > 1 or total_results != iterations:
rv = "**%s**" % rv
return rv
def process_results(log, iterations):
"""Process test log and return overall results and list of inconsistent tests."""
inconsistent = []
handler = LogHandler()
reader.handle_log(reader.read(log), handler)
results = handler.results
for test, test_results in results.iteritems():
for subtest, result in test_results.iteritems():
if is_inconsistent(result, iterations):
inconsistent.append((test, subtest, result))
return results, inconsistent
def format_comment_title(product):
"""Produce a Markdown-formatted string based on a given "product"--a string
containing a browser identifier optionally followed by a colon and a
release channel. (For example: "firefox" or "chrome:dev".) The generated
title string is used both to create new comments and to locate (and
subsequently update) previously-submitted comments."""
parts = product.split(":")
title = parts[0].title()
if len(parts) > 1:
title += " (%s channel)" % parts[1]
return "# %s #" % title
def markdown_adjust(s):
"""Escape problematic markdown sequences."""
s = s.replace('\t', u'\\t')
s = s.replace('\n', u'\\n')
s = s.replace('\r', u'\\r')
s = s.replace('`', u'\\`')
return s
def table(headings, data, log):
"""Create and log data to specified logger in tabular format."""
cols = range(len(headings))
assert all(len(item) == len(cols) for item in data)
max_widths = reduce(lambda prev, cur: [(len(cur[i]) + 2)
if (len(cur[i]) + 2) > prev[i]
else prev[i]
for i in cols],
data,
[len(item) + 2 for item in headings])
log("|%s|" % "|".join(item.center(max_widths[i]) for i, item in enumerate(headings)))
log("|%s|" % "|".join("-" * max_widths[i] for i in cols))
for row in data:
log("|%s|" % "|".join(" %s" % row[i].ljust(max_widths[i] - 1) for i in cols))
log("")
def write_inconsistent(inconsistent, iterations):
"""Output inconsistent tests to logger.error."""
logger.error("## Unstable results ##\n")
strings = [("`%s`" % markdown_adjust(test), ("`%s`" % markdown_adjust(subtest)) if subtest else "", err_string(results, iterations))
for test, subtest, results in inconsistent]
table(["Test", "Subtest", "Results"], strings, logger.error)
def write_results(results, iterations, comment_pr):
"""Output all test results to logger.info."""
pr_number = None
if comment_pr:
try:
pr_number = int(comment_pr)
except ValueError:
pass
logger.info("## All results ##\n")
if pr_number:
logger.info("<details>\n")
logger.info("<summary>%i %s ran</summary>\n\n" % (len(results),
"tests" if len(results) > 1
else "test"))
for test, test_results in results.iteritems():
baseurl = "http://w3c-test.org/submissions"
if "https" in os.path.splitext(test)[0].split(".")[1:]:
baseurl = "https://w3c-test.org/submissions"
if pr_number:
logger.info("<details>\n")
logger.info('<summary><a href="%s/%s%s">%s</a></summary>\n\n' %
(baseurl, pr_number, test, test))
else:
logger.info("### %s ###" % test)
parent = test_results.pop(None)
strings = [("", err_string(parent, iterations))]
strings.extend(((("`%s`" % markdown_adjust(subtest)) if subtest
else "", err_string(results, iterations))
for subtest, results in test_results.iteritems()))
table(["Subtest", "Results"], strings, logger.info)
if pr_number:
logger.info("</details>\n")
if pr_number:
logger.info("</details>\n")
def get_parser():
"""Create and return script-specific argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--root",
action="store",
default=os.path.join(os.path.expanduser("~"), "build"),
help="Root path")
parser.add_argument("--iterations",
action="store",
default=10,
type=int,
help="Number of times to run tests")
parser.add_argument("--comment-pr",
action="store",
default=os.environ.get("TRAVIS_PULL_REQUEST"),
help="PR to comment on with stability results")
parser.add_argument("--user",
action="store",
# Travis docs say do not depend on USER env variable.
# This is a workaround to get what should be the same value
default=os.environ.get("TRAVIS_REPO_SLUG").split('/')[0],
help="Travis user name")
parser.add_argument("--output-bytes",
action="store",
type=int,
help="Maximum number of bytes to write to standard output/error")
parser.add_argument("product",
action="store",
help="Product to run against (`browser-name` or 'browser-name:channel')")
return parser
def main():
"""Perform check_stability functionality and return exit code."""
global wpt_root
global wptrunner_root
global logger
retcode = 0
parser = get_parser()
args = parser.parse_args()
if args.output_bytes is not None:
replace_streams(args.output_bytes,
"Log reached capacity (%s bytes); output disabled." % args.output_bytes)
logger = logging.getLogger(os.path.splitext(__file__)[0])
setup_logging()
wpt_root = os.path.abspath(os.curdir)
wptrunner_root = os.path.normpath(os.path.join(wpt_root, "..", "wptrunner"))
if not os.path.exists(args.root):
logger.critical("Root directory %s does not exist" % args.root)
return 1
os.chdir(args.root)
browser_name = args.product.split(":")[0]
with TravisFold("browser_setup"):
logger.info(format_comment_title(args.product))
browser_cls = {"firefox": Firefox,
"chrome": Chrome}.get(browser_name)
if browser_cls is None:
logger.critical("Unrecognised browser %s" % browser_name)
return 1
fetch_wpt_master(args.user)
head_sha1 = get_sha1()
logger.info("Testing web-platform-tests at revision %s" % head_sha1)
# For now just pass the whole list of changed files to wptrunner and
# assume that it will run everything that's actually a test
files_changed = get_files_changed()
if not files_changed:
logger.info("No files changed")
return 0
build_manifest()
install_wptrunner()
do_delayed_imports()
browser = browser_cls()
browser.install()
browser.install_webdriver()
try:
version = browser.version(args.root)
except Exception, e:
version = "unknown (error: %s)" % e
logger.info("Using browser at version %s", version)
logger.debug("Files changed:\n%s" % "".join(" * %s\n" % item for item in files_changed))
affected_testfiles = get_affected_testfiles(files_changed)
logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in affected_testfiles))
files_changed.extend(affected_testfiles)
kwargs = wptrunner_args(args.root,
files_changed,
args.iterations,
browser)
with TravisFold("running_tests"):
logger.info("Starting %i test iterations" % args.iterations)
with open("raw.log", "wb") as log:
wptrunner.setup_logging(kwargs,
{"raw": log})
# Setup logging for wptrunner that keeps process output and
# warning+ level logs only
wptrunner.logger.add_handler(
LogActionFilter(
LogLevelFilter(
StreamHandler(
sys.stdout,
TbplFormatter()
),
"WARNING"),
["log", "process_output"]))
wptrunner.run_tests(**kwargs)
with open("raw.log", "rb") as log:
results, inconsistent = process_results(log, args.iterations)
if results:
if inconsistent:
write_inconsistent(inconsistent, args.iterations)
retcode = 2
else:
logger.info("All results were stable\n")
with TravisFold("full_results"):
write_results(results, args.iterations, args.comment_pr)
else:
logger.info("No tests run.")
return retcode
if __name__ == "__main__":
try:
retcode = main()
except:
raise
else:
sys.exit(retcode)
| mpl-2.0 | -409,717,434,014,634,000 | 33.47451 | 136 | 0.591779 | false |
ryano144/intellij-community | python/lib/Lib/encodings/cp1253.py | 593 | 13350 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\ufffe' # 0xAA -> UNDEFINED
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe' # 0xD2 -> UNDEFINED
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 | -8,835,863,375,360,395,000 | 42.485342 | 119 | 0.545843 | false |
craynot/django | django/core/management/sql.py | 399 | 1890 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
def emit_post_migrate_signal(verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
| bsd-3-clause | -6,653,575,909,798,016,000 | 36.8 | 101 | 0.667196 | false |
dpetzold/django | django/db/backends/sqlite3/operations.py | 106 | 10799 | from __future__ import unicode_literals
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
try:
import pytz
except ImportError:
pytz = None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def _require_pytz(self):
if settings.USE_TZ and pytz is None:
raise ImproperlyConfigured("This query requires pytz, but it isn't installed.")
def datetime_cast_date_sql(self, field_name, tzname):
self._require_pytz()
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
self._require_pytz()
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
self._require_pytz()
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
keys = params.keys()
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(keys, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
| bsd-3-clause | -5,279,970,971,052,905,000 | 41.853175 | 114 | 0.62904 | false |
alexallah/django | tests/template_tests/filter_tests/test_urlizetrunc.py | 105 | 3353 | from django.template.defaultfilters import urlizetrunc
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class UrlizetruncTests(SimpleTestCase):
@setup({
'urlizetrunc01': '{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}'
})
def test_urlizetrunc01(self):
output = self.engine.render_to_string(
'urlizetrunc01',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
@setup({'urlizetrunc02': '{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}'})
def test_urlizetrunc02(self):
output = self.engine.render_to_string(
'urlizetrunc02',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
class FunctionTests(SimpleTestCase):
def test_truncate(self):
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(
urlizetrunc(uri, 31),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/test/</a>',
)
self.assertEqual(
urlizetrunc(uri, 30),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/t...</a>',
)
self.assertEqual(
urlizetrunc(uri, 2),
'<a href="http://31characteruri.com/test/"'
' rel="nofollow">...</a>',
)
def test_overtruncate(self):
self.assertEqual(
urlizetrunc('http://short.com/', 20), '<a href='
'"http://short.com/" rel="nofollow">http://short.com/</a>',
)
def test_query_string(self):
self.assertEqual(
urlizetrunc('http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20),
'<a href="http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'
'meta=" rel="nofollow">http://www.google...</a>',
)
def test_non_string_input(self):
self.assertEqual(urlizetrunc(123, 1), '123')
def test_autoescape(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 10),
'foo<a href=" <a href="http://google.com" rel="nofollow">google.com</a> ">bar</a>buz'
)
def test_autoescape_off(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 9, autoescape=False),
'foo<a href=" <a href="http://google.com" rel="nofollow">google...</a> ">bar</a>buz',
)
| bsd-3-clause | -3,866,723,017,620,179,500 | 35.053763 | 119 | 0.538324 | false |
mcanningjr/Wallflower | Wallflower_Client.py | 1 | 7796 | '''
This is the chat client wallflower; it connects currently to a server hosted by CaveFox Telecommunications; but that
can be changed to any server hosting the Wallflower_Server.py software package.
'''
import pickle
import requests
import time
import threading
import hashlib
message = ''
startpoint = 0
endpoint = 0
print(' Project Wallflower')
print(' One-Time Pad Cryptography Chat Software')
print('(c)2015 Michael Canning - CaveFox Telecommunications')
print('----------------------------------------------------')
print('All text is converted to lowercase, only letters and : are supported')
print('[System] - Loading One-Time Pad...')
pad = open("crypto.pad", 'r') # Loads the one time pad
pad = pickle.load(pad)
print('[System] - Loaded...')
username = str(raw_input("Desired Username: "))
ALPHABET = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", " ", ":"]
def md5(fname): # This is used to get a had of the pad
hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash.update(chunk)
return hash.hexdigest()
def encrypt(message, startpoint): # Encrypts the message
encoded = []
# Adds numbers to 'encoded' array based on the letter
for char in message.lower(): # Loops through input
for alphabetIndex in range(1, len(ALPHABET)): # Loops through alphabet
if char is ALPHABET[alphabetIndex - 1]: # Converts that letter to its matching number
encoded.append(alphabetIndex)
break
# z = 0 # This line seems useless but I was scared to delete it
final = ''
for num in encoded: # Loops through each number
encrypted = (x + pad[startpoint]) % 28 # Pad cipher
final = final + ALPHABET[num - 1] # Gets corresponding letter
startpoint = startpoint + 1
return final, startpoint
def decrypt(message, startpoint): # Decrypts the message
encoded = []
for x in message.lower():
if x is 'a':
encoded.append(1)
if x is 'b':
encoded.append(2)
if x is 'c':
encoded.append(3)
if x is 'd':
encoded.append(4)
if x is 'e':
encoded.append(5)
if x is 'f':
encoded.append(6)
if x is 'g':
encoded.append(7)
if x is 'h':
encoded.append(8)
if x is 'i':
encoded.append(9)
if x is 'j':
encoded.append(10)
if x is 'k':
encoded.append(11)
if x is 'l':
encoded.append(12)
if x is 'm':
encoded.append(13)
if x is 'n':
encoded.append(14)
if x is 'o':
encoded.append(15)
if x is 'p':
encoded.append(16)
if x is 'q':
encoded.append(17)
if x is 'r':
encoded.append(18)
if x is 's':
encoded.append(19)
if x is 't':
encoded.append(20)
if x is 'u':
encoded.append(21)
if x is 'v':
encoded.append(22)
if x is 'w':
encoded.append(23)
if x is 'x':
encoded.append(24)
if x is 'y':
encoded.append(25)
if x is 'z':
encoded.append(26)
if x is ' ':
encoded.append(27)
if x is ':':
encoded.append(28)
z = 0
final = ''
for x in encoded:
decryptic = x - pad[startpoint]
decryptic = decryptic % 28
startpoint = startpoint + 1
if decryptic is 1:
final = final + 'a'
if decryptic is 2:
final = final + 'b'
if decryptic is 3:
final = final + 'c'
if decryptic is 4:
final = final + 'd'
if decryptic is 5:
final = final + 'e'
if decryptic is 6:
final = final + 'f'
if decryptic is 7:
final = final + 'g'
if decryptic is 8:
final = final + 'h'
if decryptic is 9:
final = final + 'i'
if decryptic is 10:
final = final + 'j'
if decryptic is 11:
final = final + 'k'
if decryptic is 12:
final = final + 'l'
if decryptic is 13:
final = final + 'm'
if decryptic is 14:
final = final + 'n'
if decryptic is 15:
final = final + 'o'
if decryptic is 16:
final = final + 'p'
if decryptic is 17:
final = final + 'q'
if decryptic is 18:
final = final + 'r'
if decryptic is 19:
final = final + 's'
if decryptic is 20:
final = final + 't'
if decryptic is 21:
final = final + 'u'
if decryptic is 22:
final = final + 'v'
if decryptic is 23:
final = final + 'w'
if decryptic is 24:
final = final + 'x'
if decryptic is 25:
final = final + 'y'
if decryptic is 26:
final = final + 'z'
if decryptic is 27:
final = final + ' '
if decryptic is 0:
final = final + ':'
return final, startpoint
class getmessage(threading.Thread): # Thread to get the latest message every second; time can be change to faster or slower
def __init__(self, id):
self.id = id
threading.Thread.__init__(self)
def run(self):
messagecheck = ''
flag = 0
while True:
time.sleep(1)
r = requests.get('http://198.100.155.138:5000/read/startpoint/' + str(id))
startpoint = int(r.text)
r = requests.get('http://198.100.155.138:5000/read/message/' + str(id))
cryptic = str(r.text)
if (cryptic != messagecheck):
if (flag >= 5):
r = requests.get('http://198.100.155.138:5000/read/nextpoint/' + str(id))
nextpoint = int(r.text)
print('[System] - ' + str(float((len(pad) - nextpoint - 1))/float(len(pad))) + "% of Pad Used")
flag = 0
else:
flag = flag + 1
message, trash = decrypt(cryptic, startpoint)
print "[Channel] - " + message
messagecheck = cryptic
class sendmessage(threading.Thread): # Sends messages with a thread, and also sends the join server message
def __init__(self, id):
self.username = username
self.id = id
r = requests.get('http://198.100.155.138:5000/read/nextpoint/' + str(id))
startpoint = int(r.text)
print('[System] - You are chatting securely on channel: [' + str(id) + ']')
cryptic, startpointx = encrypt(self.username + " Has Joined!", startpoint)
requests.get("http://198.100.155.138:5000/post/" + str(id) + "/" + str(cryptic) + "/" + str(len('A User Has Joined')))
threading.Thread.__init__(self)
def run(self):
while True:
message = str(raw_input('Message: \n'))
r = requests.get('http://198.100.155.138:5000/read/nextpoint/' + str(id))
startpoint = int(r.text)
cryptic, startpointx = encrypt(self.username + ' : ' + message, startpoint)
requests.get("http://198.100.155.138:5000/post/" + str(id) + "/" + str(cryptic) + "/" + str(len(message)))
id = abs(int(hash(md5('crypto.pad')))) # Hashes the Pad to connect to the channel for it on the server
getmessage(id).start() # Starts the message get thread
sendmessage(id).start() # Starts the message send thread
| mit | 7,149,532,931,238,247,000 | 33.959641 | 126 | 0.51706 | false |
watonyweng/neutron | neutron/db/migration/alembic_migrations/dvr_init_opts.py | 32 | 2933 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for dvr
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'dvr_host_macs',
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('mac_address', sa.String(length=32),
nullable=False, unique=True),
sa.PrimaryKeyConstraint('host')
)
op.create_table(
'ml2_dvr_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=True),
sa.Column('vif_type', sa.String(length=64), nullable=False),
sa.Column('vif_details', sa.String(length=4095),
nullable=False, server_default=''),
sa.Column('vnic_type', sa.String(length=64),
nullable=False, server_default='normal'),
sa.Column('profile', sa.String(length=4095),
nullable=False, server_default=''),
sa.Column('cap_port_filter', sa.Boolean(), nullable=False),
sa.Column('driver', sa.String(length=64), nullable=True),
sa.Column('segment', sa.String(length=36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('port_id', 'host')
)
op.create_table(
'csnat_l3_agent_bindings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('l3_agent_id', sa.String(length=36), nullable=False),
sa.Column('host_id', sa.String(length=255), nullable=True),
sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
| apache-2.0 | 1,535,259,008,913,660,200 | 44.123077 | 78 | 0.607228 | false |
areski/django | tests/gis_tests/geogapp/tests.py | 20 | 6033 | """
Tests for geography support in PostGIS
"""
from __future__ import unicode_literals
import os
from unittest import skipUnless
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.measure import D
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from ..utils import oracle, postgis
from .models import City, County, Zipcode
@skipUnlessDBFeature("gis_enabled")
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
@skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
Zipcode.objects.distance(htown.point)
@skipUnless(postgis, "This is a PostGIS-specific test")
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
@skipUnlessDBFeature("has_area_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.13586914 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
@skipUnlessDBFeature("gis_enabled")
class GeographyFunctionTests(TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
if oracle:
ref_dists = [0, 4899.68, 8081.30, 9115.15]
else:
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(distance=Distance('poly', htown.point))
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
@skipUnlessDBFeature("has_Area_function", "supports_distance_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.13587 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
| bsd-3-clause | 4,099,946,328,874,764,300 | 44.022388 | 105 | 0.65208 | false |
rishiloyola/bedrock | bedrock/mozorg/tests/test_context_processors.py | 29 | 1553 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.test.client import RequestFactory
from bedrock.base.urlresolvers import reverse
from nose.tools import eq_
from bedrock.mozorg.context_processors import funnelcake_param
from bedrock.mozorg.tests import TestCase
class TestFunnelcakeParam(TestCase):
def setUp(self):
self.rf = RequestFactory()
def _funnelcake(self, url='/', **kwargs):
return funnelcake_param(self.rf.get(url, kwargs))
def test_funnelcake_param_noop(self):
"""Should return an empty dict normally."""
eq_(self._funnelcake(), {})
def test_funnelcake_param_f(self):
"""Should inject funnelcake into context."""
eq_(self._funnelcake(f='5'), {'funnelcake_id': '5'})
eq_(self._funnelcake(f='234'), {'funnelcake_id': '234'})
def test_funnelcake_param_bad(self):
"""Should not inject bad funnelcake into context."""
eq_(self._funnelcake(f='5dude'), {})
eq_(self._funnelcake(f='123456'), {})
def test_funnelcake_param_increment_installer_help(self):
"""FC param should be +1 on the firefox/installer-help/ page.
Bug 933852.
"""
url = reverse('firefox.installer-help')
ctx = self._funnelcake(url, f='20')
eq_(ctx['funnelcake_id'], '21')
ctx = self._funnelcake(url, f='10')
eq_(ctx['funnelcake_id'], '11')
| mpl-2.0 | 4,893,414,477,847,255,000 | 33.511111 | 69 | 0.642627 | false |
jdugge/QGIS | python/plugins/processing/script/ScriptUtils.py | 12 | 4991 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from qgis.processing import alg as algfactory
import os
import inspect
import importlib
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsApplication,
QgsProcessingAlgorithm,
QgsProcessingFeatureBasedAlgorithm,
QgsMessageLog
)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools.system import mkdir, userFolder
scriptsRegistry = dict()
SCRIPTS_FOLDERS = "SCRIPTS_FOLDERS"
def defaultScriptsFolder():
folder = str(os.path.join(userFolder(), "scripts"))
mkdir(folder)
return os.path.abspath(folder)
def scriptsFolders():
folder = ProcessingConfig.getSetting(SCRIPTS_FOLDERS)
if folder is not None:
return folder.split(";")
else:
return [defaultScriptsFolder()]
def loadAlgorithm(moduleName, filePath):
global scriptsRegistry
try:
spec = importlib.util.spec_from_file_location(moduleName, filePath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
try:
alg = algfactory.instances.pop().createInstance()
scriptsRegistry[alg.name()] = filePath
return alg
except IndexError:
for x in dir(module):
obj = getattr(module, x)
if inspect.isclass(obj) and issubclass(obj, (QgsProcessingAlgorithm, QgsProcessingFeatureBasedAlgorithm)) and obj.__name__ not in ("QgsProcessingAlgorithm", "QgsProcessingFeatureBasedAlgorithm"):
o = obj()
scriptsRegistry[o.name()] = filePath
return o
except (ImportError, AttributeError, TypeError) as e:
QgsMessageLog.logMessage(QCoreApplication.translate("ScriptUtils", "Could not import script algorithm '{}' from '{}'\n{}").format(moduleName, filePath, str(e)),
QCoreApplication.translate("ScriptUtils", "Processing"),
Qgis.Critical)
def findAlgorithmSource(name):
global scriptsRegistry
try:
return scriptsRegistry[name]
except:
return None
def resetScriptFolder(folder):
"""Check if script folder exist. If not, notify and try to check if it is absolute to another user setting.
If so, modify folder to change user setting to the current user setting."""
newFolder = folder
if os.path.exists(newFolder):
return newFolder
QgsMessageLog.logMessage(QgsApplication .translate("loadAlgorithms", "Script folder {} does not exist").format(newFolder),
QgsApplication.translate("loadAlgorithms", "Processing"),
Qgis.Warning)
if not os.path.isabs(newFolder):
return None
# try to check if folder is absolute to other QgsApplication.qgisSettingsDirPath()
# isolate "QGIS3/profiles/"
appIndex = -4
profileIndex = -3
currentSettingPath = QgsApplication.qgisSettingsDirPath()
paths = currentSettingPath.split(os.sep)
commonSettingPath = os.path.join(paths[appIndex], paths[profileIndex])
if commonSettingPath in newFolder:
# strip not common folder part. e.g. preserve the profile path
# stripping the heading part that come from another location
tail = newFolder[newFolder.find(commonSettingPath):]
# tail folder with the actual userSetting path
header = os.path.join(os.sep, os.path.join(*paths[:appIndex]))
newFolder = os.path.join(header, tail)
# skip if it does not exist
if not os.path.exists(newFolder):
return None
QgsMessageLog.logMessage(QgsApplication .translate("loadAlgorithms", "Script folder changed into {}").format(newFolder),
QgsApplication.translate("loadAlgorithms", "Processing"),
Qgis.Warning)
return newFolder
| gpl-2.0 | 2,745,231,356,632,388,600 | 36.810606 | 211 | 0.584652 | false |
ChameleonCloud/horizon | openstack_dashboard/test/unit/test_error_pages.py | 10 | 1325 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
from django.conf import settings
from openstack_dashboard.test import helpers as test
class ErrorPageTests(test.TestCase):
"""Tests for error pages."""
urls = 'openstack_dashboard.test.error_pages_urls'
def test_500_error(self):
with self.settings(
TEMPLATES=[{
'DIRS': [path.join(settings.ROOT_PATH, 'templates')],
'BACKEND': ('django.template.backends.django.'
'DjangoTemplates')
}],
ROOT_URLCONF=self.urls):
response = self.client.get('/500/')
self.assertIn(b'Server error', response.content)
| apache-2.0 | -1,478,395,362,107,723,000 | 35.805556 | 78 | 0.646792 | false |
jesseditson/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/security.py | 145 | 4264 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the security helpers.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.security import check_password_hash, generate_password_hash, \
safe_join, pbkdf2_hex, safe_str_cmp
class SecurityTestCase(WerkzeugTestCase):
def test_safe_str_cmp(self):
assert safe_str_cmp('a', 'a') is True
assert safe_str_cmp(b'a', u'a') is True
assert safe_str_cmp('a', 'b') is False
assert safe_str_cmp(b'aaa', 'aa') is False
assert safe_str_cmp(b'aaa', 'bbb') is False
assert safe_str_cmp(b'aaa', u'aaa') is True
def test_password_hashing(self):
hash0 = generate_password_hash('default')
assert check_password_hash(hash0, 'default')
assert hash0.startswith('pbkdf2:sha1:1000$')
hash1 = generate_password_hash('default', 'sha1')
hash2 = generate_password_hash(u'default', method='sha1')
assert hash1 != hash2
assert check_password_hash(hash1, 'default')
assert check_password_hash(hash2, 'default')
assert hash1.startswith('sha1$')
assert hash2.startswith('sha1$')
fakehash = generate_password_hash('default', method='plain')
assert fakehash == 'plain$$default'
assert check_password_hash(fakehash, 'default')
mhash = generate_password_hash(u'default', method='md5')
assert mhash.startswith('md5$')
assert check_password_hash(mhash, 'default')
legacy = 'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
legacy = u'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
def test_safe_join(self):
assert safe_join('foo', 'bar/baz') == os.path.join('foo', 'bar/baz')
assert safe_join('foo', '../bar/baz') is None
if os.name == 'nt':
assert safe_join('foo', 'foo\\bar') is None
def test_pbkdf2(self):
def check(data, salt, iterations, keylen, expected):
rv = pbkdf2_hex(data, salt, iterations, keylen)
self.assert_equal(rv, expected)
# From RFC 6070
check('password', 'salt', 1, None,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 1, 20,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 2, 20,
'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957')
check('password', 'salt', 4096, 20,
'4b007901b765489abead49d926f721d065a429c1')
check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt',
4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038')
check('pass\x00word', 'sa\x00lt', 4096, 16,
'56fa6aa75548099dcc37d7f03425e0c3')
# This one is from the RFC but it just takes for ages
##check('password', 'salt', 16777216, 20,
## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
# From Crypt-PBKDF2
check('password', 'ATHENA.MIT.EDUraeburn', 1, 16,
'cdedb5281bb2f801565a1122b2563515')
check('password', 'ATHENA.MIT.EDUraeburn', 1, 32,
'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 16,
'01dbee7f4a9e243e988b62c73cda935d')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 32,
'01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86')
check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32,
'5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13')
check('X' * 64, 'pass phrase equals block size', 1200, 32,
'139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1')
check('X' * 65, 'pass phrase exceeds block size', 1200, 32,
'9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SecurityTestCase))
return suite
| agpl-3.0 | -6,947,125,455,598,610,000 | 39.609524 | 81 | 0.642589 | false |
vermouthmjl/scikit-learn | sklearn/metrics/classification.py | 1 | 69294 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
y_pred = check_array(y_pred, ensure_2d=False)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
https://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause | -5,425,747,752,748,870,000 | 37.031833 | 87 | 0.607037 | false |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/encodings/utf_32.py | 180 | 5128 | """
Python 'utf-32' Codec
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_32_encode
def decode(input, errors='strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
def getstate(self):
# additonal state info from the base class must be None here,
# as it isn't passed along to the caller
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
# additional state info we pass to the caller:
# 0: stream is in natural order for this platform
# 1: stream is in unnatural order
# 2: endianness hasn't been determined yet
if self.decoder is None:
return (state, 2)
addstate = int((sys.byteorder == "big") !=
(self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = (codecs.utf_32_be_decode
if sys.byteorder == "big"
else codecs.utf_32_le_decode)
elif state == 1:
self.decoder = (codecs.utf_32_le_decode
if sys.byteorder == "big"
else codecs.utf_32_be_decode)
else:
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed>=4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-32',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 | 5,326,642,413,882,388,000 | 33.186667 | 75 | 0.585998 | false |
ianblenke/awsebcli | ebcli/bundled/botocore/vendored/requests/compat.py | 114 | 2601 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| apache-2.0 | 7,107,110,945,369,011,000 | 21.617391 | 132 | 0.639369 | false |
w3nd1go/android_external_skia | platform_tools/android/gyp_gen/makefile_writer.py | 25 | 7208 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions for creating an Android.mk from already created dictionaries.
"""
import os
def write_group(f, name, items, append):
"""Helper function to list all names passed to a variable.
Args:
f: File open for writing (Android.mk)
name: Name of the makefile variable (e.g. LOCAL_CFLAGS)
items: list of strings to be passed to the variable.
append: Whether to append to the variable or overwrite it.
"""
if not items:
return
# Copy the list so we can prepend it with its name.
items_to_write = list(items)
if append:
items_to_write.insert(0, '%s +=' % name)
else:
items_to_write.insert(0, '%s :=' % name)
f.write(' \\\n\t'.join(items_to_write))
f.write('\n\n')
def write_local_vars(f, var_dict, append, name):
"""Helper function to write all the members of var_dict to the makefile.
Args:
f: File open for writing (Android.mk)
var_dict: VarsDict holding the unique values for one configuration.
append: Whether to append to each makefile variable or overwrite it.
name: If not None, a string to be appended to each key.
"""
for key in var_dict.keys():
_key = key
_items = var_dict[key]
if key == 'LOCAL_CFLAGS':
# Always append LOCAL_CFLAGS. This allows us to define some early on in
# the makefile and not overwrite them.
_append = True
elif key == 'DEFINES':
# For DEFINES, we want to append to LOCAL_CFLAGS.
_append = True
_key = 'LOCAL_CFLAGS'
_items_with_D = []
for define in _items:
_items_with_D.append('-D' + define)
_items = _items_with_D
elif key == 'KNOWN_TARGETS':
# KNOWN_TARGETS are not needed in the final make file.
continue
else:
_append = append
if name:
_key += '_' + name
write_group(f, _key, _items, _append)
AUTOGEN_WARNING = (
"""
###############################################################################
#
# THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
#
# For bugs, please contact [email protected] or [email protected]
#
###############################################################################
"""
)
DEBUGGING_HELP = (
"""
###############################################################################
#
# PROBLEMS WITH SKIA DEBUGGING?? READ THIS...
#
# The debug build results in changes to the Skia headers. This means that those
# using libskia must also be built with the debug version of the Skia headers.
# There are a few scenarios where this comes into play:
#
# (1) You're building debug code that depends on libskia.
# (a) If libskia is built in release, then define SK_RELEASE when building
# your sources.
# (b) If libskia is built with debugging (see step 2), then no changes are
# needed since your sources and libskia have been built with SK_DEBUG.
# (2) You're building libskia in debug mode.
# (a) RECOMMENDED: You can build the entire system in debug mode. Do this by
# updating your build/core/config.mk to include -DSK_DEBUG on the line
# that defines COMMON_GLOBAL_CFLAGS
# (b) You can update all the users of libskia to define SK_DEBUG when they are
# building their sources.
#
# NOTE: If neither SK_DEBUG or SK_RELEASE are defined then Skia checks NDEBUG to
# determine which build type to use.
###############################################################################
"""
)
SKIA_TOOLS = (
"""
#############################################################
# Build the skia tools
#
# benchmark (timings)
include $(BASE_PATH)/bench/Android.mk
# diamond-master (one test to rule them all)
include $(BASE_PATH)/dm/Android.mk
"""
)
class VarsDictData(object):
"""Helper class to keep a VarsDict along with a name and optional condition.
"""
def __init__(self, vars_dict, name, condition=None):
"""Create a new VarsDictData.
Args:
vars_dict: A VarsDict. Can be accessed via self.vars_dict.
name: Name associated with the VarsDict. Can be accessed via
self.name.
condition: Optional string representing a condition. If not None,
used to create a conditional inside the makefile.
"""
self.vars_dict = vars_dict
self.condition = condition
self.name = name
def write_local_path(f):
"""Add the LOCAL_PATH line to the makefile.
Args:
f: File open for writing.
"""
f.write('LOCAL_PATH:= $(call my-dir)\n')
def write_clear_vars(f):
"""Add the CLEAR_VARS line to the makefile.
Args:
f: File open for writing.
"""
f.write('include $(CLEAR_VARS)\n')
def write_android_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write Android.mk, or None
to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'Android.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write('BASE_PATH := $(call my-dir)\n')
write_local_path(f)
f.write(DEBUGGING_HELP)
write_clear_vars(f)
# need flags to enable feedback driven optimization (FDO) when requested
# by the build system.
f.write('LOCAL_FDO_SUPPORT := true\n')
f.write('ifneq ($(strip $(TARGET_FDO_CFLAGS)),)\n')
f.write('\t# This should be the last -Oxxx specified in LOCAL_CFLAGS\n')
f.write('\tLOCAL_CFLAGS += -O2\n')
f.write('endif\n\n')
f.write('LOCAL_ARM_MODE := thumb\n')
# need a flag to tell the C side when we're on devices with large memory
# budgets (i.e. larger than the low-end devices that initially shipped)
# On arm, only define the flag if it has VFP. For all other architectures,
# always define the flag.
f.write('ifeq ($(TARGET_ARCH),arm)\n')
f.write('\tifeq ($(ARCH_ARM_HAVE_VFP),true)\n')
f.write('\t\tLOCAL_CFLAGS += -DANDROID_LARGE_MEMORY_DEVICE\n')
f.write('\tendif\n')
f.write('else\n')
f.write('\tLOCAL_CFLAGS += -DANDROID_LARGE_MEMORY_DEVICE\n')
f.write('endif\n\n')
f.write('# used for testing\n')
f.write('#LOCAL_CFLAGS += -g -O0\n\n')
f.write('ifeq ($(NO_FALLBACK_FONT),true)\n')
f.write('\tLOCAL_CFLAGS += -DNO_FALLBACK_FONT\n')
f.write('endif\n\n')
write_local_vars(f, common, False, None)
for data in deviations_from_common:
if data.condition:
f.write('ifeq ($(%s), true)\n' % data.condition)
write_local_vars(f, data.vars_dict, True, data.name)
if data.condition:
f.write('endif\n\n')
f.write('include $(BUILD_SHARED_LIBRARY)\n')
f.write(SKIA_TOOLS)
| bsd-3-clause | 2,863,019,524,601,398,300 | 30.475983 | 80 | 0.626804 | false |
dmitriy0611/django | tests/urlpatterns_reverse/tests.py | 7 | 42428 | # -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import sys
import unittest
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import override_script_prefix
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from . import middleware, urlconf_outer, views
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', 'view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', 'inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', 'inc-view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/12/mixed_args/42/37/', 'inc-mixed-args', None, '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views should have None as the url_name. Regression data for #21157.
('/unnamed/normal/42/37/', None, None, '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', None, None, '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/12/no_kwargs/42/37/', 'inc-no-kwargs', None, '', 'inc-no-kwargs', views.empty_view, ('12', '42', '37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1': 10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
self.assertRaisesMessage(
ImproperlyConfigured,
"The included urlconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import.",
getattr, resolver, 'url_patterns'
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
try:
# this url exists, but requires an argument
reverse("people", args=[])
except NoReverseMatch as e:
pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
self.assertIn(pattern_description, str(e))
else:
# we can't use .assertRaises, since we want to inspect the
# exception
self.fail("Expected a NoReverseMatch, but none occurred.")
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_reverse_returns_unicode(self):
name, expected, args, kwargs = test_data[0]
self.assertIsInstance(
reverse(name, args=args, kwargs=kwargs),
six.text_type
)
class ResolverTests(unittest.TestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
try:
resolver.resolve(proxy_url)
except TypeError:
self.fail('Failed to coerce lazy object to text')
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
if six.PY2:
self.assertEqual(
b'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
Test that reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.core.urlresolvers import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_reverse_by_path_nested(self):
# Views that are added to urlpatterns using include() should be
# reversible by dotted path.
self.assertEqual(reverse('urlpatterns_reverse.views.nested_view'), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37, 42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37, 42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-dotted/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-object/')
# Regression test for #21157
self.assertRaises(ImportError, self.client.get, '/erroneous_unqualified/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf (#6170).
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
def test_include_app_name_but_no_namespace(self):
msg = "Must specify a namespace if specifying app_name."
with self.assertRaisesMessage(ValueError, msg):
include('urls', app_name='bar')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
| bsd-3-clause | 8,287,728,707,545,435,000 | 48.96702 | 240 | 0.623898 | false |
pombredanne/dateparser-1 | dateparser/conf.py | 1 | 2222 | # -*- coding: utf-8 -*-
from pkgutil import get_data
from yaml import load as load_yaml
"""
:mod:`dateparser`'s parsing behavior can be configured like below
*``PREFER_DAY_OF_MONTH``* defaults to ``current`` and can have ``first`` and ``last`` as values::
>>> from dateparser.conf import settings
>>> from dateparser import parse
>>> parse(u'December 2015')
datetime.datetime(2015, 12, 16, 0, 0)
>>> settings.update('PREFER_DAY_OF_MONTH', 'last')
>>> parse(u'December 2015')
datetime.datetime(2015, 12, 31, 0, 0)
>>> settings.update('PREFER_DAY_OF_MONTH', 'first')
>>> parse(u'December 2015')
datetime.datetime(2015, 12, 1, 0, 0)
*``PREFER_DATES_FROM``* defaults to ``current_period`` and can have ``past`` and ``future`` as values.
Assuming current date is June 16, 2015::
>>> from dateparser.conf import settings
>>> from dateparser import parse
>>> parse(u'March')
datetime.datetime(2015, 3, 16, 0, 0)
>>> settings.update('PREFER_DATES_FROM', 'future')
>>> parse(u'March')
datetime.datetime(2016, 3, 16, 0, 0)
*``SKIP_TOKENS``* is a ``list`` of tokens to discard while detecting language. Defaults to ``['t']`` which skips T in iso format datetime string.e.g. ``2015-05-02T10:20:19+0000``.
This only works with :mod:`DateDataParser` like below:
>>> settings.update('SKIP_TOKENS', ['de']) # Turkish word for 'at'
>>> from dateparser.date import DateDataParser
>>> DateDataParser().get_date_data(u'27 Haziran 1981 de') # Turkish (at 27 June 1981)
{'date_obj': datetime.datetime(1981, 6, 27, 0, 0), 'period': 'day'}
"""
class Settings(object):
def __init__(self, **kwargs):
"""
Settings are now loaded using the data/settings.yaml file.
"""
data = get_data('data', 'settings.yaml')
data = load_yaml(data)
settings_data = data.pop('settings', {})
for datum in settings_data:
setattr(self, datum, settings_data[datum])
for key in kwargs:
setattr(self, key, kwargs[key])
def update(self, key, value):
setattr(self, key, value)
def reload_settings():
global settings
settings = Settings()
settings = Settings()
| bsd-3-clause | -6,598,953,299,249,950,000 | 32.164179 | 179 | 0.628263 | false |
luiseduardohdbackup/odoo | addons/crm/crm_phonecall.py | 255 | 14844 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone'),
'partner_mobile': fields.char('Mobile'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': '1',
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
'opportunity_id': call.opportunity_id and call.opportunity_id.id or False,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
partner_ids = []
phonecall = self.browse(cr, uid, ids[0], context)
if phonecall.partner_id and phonecall.partner_id.email:
partner_ids.append(phonecall.partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_ids': partner_ids,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,431,875,314,257,003,000 | 47.990099 | 145 | 0.568243 | false |
ghxandsky/ceph-deploy | ceph_deploy/hosts/__init__.py | 2 | 5008 | """
We deal (mostly) with remote hosts. To avoid special casing each different
commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to
that remote host and set all the special cases for running commands depending
on the type of distribution/version we are dealing with.
"""
import logging
from ceph_deploy import exc
from ceph_deploy.hosts import debian, centos, fedora, suse, remotes, rhel
from ceph_deploy.connection import get_connection
logger = logging.getLogger()
def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse']
module.is_deb = not module.is_rpm
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
return module
def _get_distro(distro, fallback=None, use_rhceph=False):
if not distro:
return
distro = _normalized_distro_name(distro)
distributions = {
'debian': debian,
'ubuntu': debian,
'centos': centos,
'scientific': centos,
'redhat': centos,
'fedora': fedora,
'suse': suse,
}
if distro == 'redhat' and use_rhceph:
return rhel
else:
return distributions.get(distro) or _get_distro(fallback)
def _normalized_distro_name(distro):
distro = distro.lower()
if distro.startswith(('redhat', 'red hat')):
return 'redhat'
elif distro.startswith(('scientific', 'scientific linux')):
return 'scientific'
elif distro.startswith(('suse', 'opensuse')):
return 'suse'
elif distro.startswith('centos'):
return 'centos'
elif distro.startswith('linuxmint'):
return 'ubuntu'
return distro
def _normalized_release(release):
"""
A normalizer function to make sense of distro
release versions.
Returns an object with: major, minor, patch, and garbage
These attributes can be accessed as ints with prefixed "int"
attribute names, for example:
normalized_version.int_major
"""
release = release.strip()
class NormalizedVersion(object):
pass
v = NormalizedVersion() # fake object to get nice dotted access
v.major, v.minor, v.patch, v.garbage = (release.split('.') + ["0"]*4)[:4]
release_map = dict(major=v.major, minor=v.minor, patch=v.patch, garbage=v.garbage)
# safe int versions that remove non-numerical chars
# for example 'rc1' in a version like '1-rc1
for name, value in release_map.items():
if '-' in value: # get rid of garbage like -dev1 or -rc1
value = value.split('-')[0]
value = float(''.join(c for c in value if c.isdigit()) or 0)
int_name = "int_%s" % name
setattr(v, int_name, value)
return v
| mit | 8,559,664,192,564,392,000 | 34.51773 | 115 | 0.650958 | false |
simbs/edx-platform | lms/djangoapps/courseware/management/commands/tests/test_dump_course.py | 44 | 9075 | # coding=utf-8
"""Tests for Django management commands"""
import json
from nose.plugins.attrib import attr
from path import Path as path
import shutil
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
import factory
from django.conf import settings
from django.core.management import call_command
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
DATA_DIR = settings.COMMON_TEST_DATA_ROOT
XML_COURSE_DIRS = ['toy', 'simple', 'open_ended']
MAPPINGS = {
'edX/toy/2012_Fall': 'xml',
'edX/simple/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
}
TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config(
DATA_DIR, MAPPINGS, include_xml=True, xml_source_dirs=XML_COURSE_DIRS,
)
@attr('shard_1')
class CommandsTestBase(ModuleStoreTestCase):
"""
Base class for testing different django commands.
Must be subclassed using override_settings set to the modulestore
to be tested.
"""
__test__ = False
url_name = '2012_Fall'
def setUp(self):
super(CommandsTestBase, self).setUp()
self.test_course_key = modulestore().make_course_key("edX", "simple", "2012_Fall")
self.loaded_courses = self.load_courses()
def load_courses(self):
"""Load test courses and return list of ids"""
store = modulestore()
# Add a course with a unicode name.
unique_org = factory.Sequence(lambda n: u'ëḋẌ.%d' % n)
CourseFactory.create(
org=unique_org,
course=u'śíḿṕĺé',
display_name=u'2012_Fáĺĺ',
modulestore=store
)
courses = store.get_courses()
# NOTE: if xml store owns these, it won't import them into mongo
if self.test_course_key not in [c.id for c in courses]:
import_course_from_xml(
store, ModuleStoreEnum.UserID.mgmt_command, DATA_DIR, XML_COURSE_DIRS, create_if_not_present=True
)
return [course.id for course in store.get_courses()]
def call_command(self, name, *args, **kwargs):
"""Call management command and return output"""
out = StringIO() # To Capture the output of the command
call_command(name, *args, stdout=out, **kwargs)
out.seek(0)
return out.read()
def test_dump_course_ids(self):
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_ids', **kwargs)
dumped_courses = output.decode('utf-8').strip().split('\n')
course_ids = {unicode(course_id) for course_id in self.loaded_courses}
dumped_ids = set(dumped_courses)
self.assertEqual(course_ids, dumped_ids)
def test_correct_course_structure_metadata(self):
course_id = unicode(modulestore().make_course_key('edX', 'open_ended', '2012_Fall'))
args = [course_id]
kwargs = {'modulestore': 'default'}
try:
output = self.call_command('dump_course_structure', *args, **kwargs)
except TypeError, exception:
self.fail(exception)
dump = json.loads(output)
self.assertGreater(len(dump.values()), 0)
def test_dump_course_structure(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have metadata,
# but not inherited metadata:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertNotIn('inherited_metadata', element)
# Check a few elements in the course dump
test_course_key = self.test_course_key
parent_id = unicode(test_course_key.make_usage_key('chapter', 'Overview'))
self.assertEqual(dump[parent_id]['category'], 'chapter')
self.assertEqual(len(dump[parent_id]['children']), 3)
child_id = dump[parent_id]['children'][1]
self.assertEqual(dump[child_id]['category'], 'videosequence')
self.assertEqual(len(dump[child_id]['children']), 2)
video_id = unicode(test_course_key.make_usage_key('video', 'Welcome'))
self.assertEqual(dump[video_id]['category'], 'video')
self.assertItemsEqual(
dump[video_id]['metadata'].keys(),
['download_video', 'youtube_id_0_75', 'youtube_id_1_0', 'youtube_id_1_25', 'youtube_id_1_5']
)
self.assertIn('youtube_id_1_0', dump[video_id]['metadata'])
# Check if there are the right number of elements
self.assertEqual(len(dump), 16)
def test_dump_inherited_course_structure(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default', 'inherited': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... but does not contain inherited metadata containing a default value:
self.assertNotIn('due', element['inherited_metadata'])
def test_dump_inherited_course_structure_with_defaults(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... and contains inherited metadata containing a default value:
self.assertIsNone(element['inherited_metadata']['due'])
def test_export_course(self):
tmp_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, tmp_dir)
filename = tmp_dir / 'test.tar.gz'
self.run_export_course(filename)
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file)
def test_export_course_stdout(self):
output = self.run_export_course('-')
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file)
def run_export_course(self, filename): # pylint: disable=missing-docstring
args = [unicode(self.test_course_key), filename]
kwargs = {'modulestore': 'default'}
return self.call_command('export_course', *args, **kwargs)
def check_export_file(self, tar_file): # pylint: disable=missing-docstring
names = tar_file.getnames()
# Check if some of the files are present.
# The rest is of the code should be covered by the tests for
# xmodule.modulestore.xml_exporter, used by the dump_course command
assert_in = self.assertIn
assert_in('edX-simple-2012_Fall', names)
assert_in('edX-simple-2012_Fall/policies/{}/policy.json'.format(self.url_name), names)
assert_in('edX-simple-2012_Fall/html/toylab.html', names)
assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names)
assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names)
class CommandsXMLTestCase(CommandsTestBase):
"""
Test case for management commands with the xml modulestore present.
"""
MODULESTORE = TEST_DATA_MIXED_XML_MODULESTORE
__test__ = True
class CommandsMongoTestCase(CommandsTestBase):
"""
Test case for management commands using the mixed mongo modulestore with old mongo as the default.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
__test__ = True
class CommandSplitMongoTestCase(CommandsTestBase):
"""
Test case for management commands using the mixed mongo modulestore with split as the default.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
__test__ = True
url_name = 'course'
| agpl-3.0 | -7,990,228,996,075,651,000 | 37.385593 | 113 | 0.650844 | false |
phaustin/pythermo | code/thermlib/rootfinder.py | 1 | 1456 | #!/usr/bin/env python
import numpy
from scipy import optimize
def find_interval(f, x, *args):
x1 = x
x2 = x
if x == 0.:
dx = 1./50.
else:
dx = x/50.
maxiter = 40
twosqrt = numpy.sqrt(2)
a = x
fa = f(a, *args)
b = x
fb = f(b, *args)
for i in range(maxiter):
dx = dx*twosqrt
a = x - dx
fa = f(a, *args)
b = x + dx
fb = f(b, *args)
if (fa*fb < 0.): return (a, b)
raise "Couldn't find a suitable range."
# This function evaluates a new point, sets the y range,
# and tests for convergence
def get_y(x, f, eps, ymax, ymin, *args):
y = f(x, *args)
ymax = max(ymax, y)
ymin = min(ymin, y)
converged = (abs(y) < eps*(ymax-ymin))
return (y, ymax, ymin, converged)
def fzero(the_func, root_bracket, *args, **parms):
# the_func is the function we wish to find the zeros of
# root_bracket is an initial guess of the zero location.
# Can be a float or a sequence of two floats specifying a range
# *args contains any other parameters needed for f
# **parms can be eps (allowable error) or maxiter (max number of iterations.)
answer=optimize.zeros.brenth(the_func,263,315,args=(e_target))
return answer
def testfunc(x):
return numpy.sin(x)
if __name__=="__main__":
f = testfunc
x = 1.
print fzero(f, x)
print fzero(f, x, eps=1e-300, maxiter = 80.)
| mit | -2,213,732,340,886,494,200 | 24.54386 | 81 | 0.565247 | false |
alexproca/askbot-devel | askbot/migrations/0095_postize_award_and_repute.py | 18 | 31809 | # encoding: utf-8
import sys
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.utils.console import ProgressBar
class Migration(DataMigration):
def forwards(self, orm):
# ContentType for Post model should be created no later than in migration 0092
ct_post = orm['contenttypes.ContentType'].objects.get(app_label='askbot', model='post')
message = "Connecting award objects to posts"
num_awards = orm.Award.objects.count()
for aw in ProgressBar(orm.Award.objects.iterator(), num_awards, message):
ct = aw.content_type
if ct.app_label == 'askbot' and ct.model in ('question', 'answer', 'comment'):
aw.content_type = ct_post
try:
aw.object_id = orm.Post.objects.get(**{'self_%s__id' % str(ct.model): aw.object_id}).id
except orm.Post.DoesNotExist:
continue
aw.save()
###
message = "Connecting repute objects to posts"
num_reputes = orm.Repute.objects.count()
for rp in ProgressBar(orm.Repute.objects.iterator(), num_reputes, message):
if rp.question:
rp.question_post = orm.Post.objects.get(self_question__id=rp.question.id)
rp.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_posts'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'self_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'self_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Comment']"}),
'self_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
# "Post-processing" - added manually to add support for URL mapping
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Answer']", 'null': 'True', 'blank': 'True'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 | -1,976,260,454,712,408,600 | 86.147945 | 226 | 0.557609 | false |
acarmel/CouchPotatoServer | libs/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-3.0 | -5,665,446,975,296,884,000 | 50.219512 | 83 | 0.542857 | false |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/decomposition/plot_sparse_coding.py | 1 | 4054 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import matplotlib.pylab as plt
import numpy as np
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| mit | -4,793,937,489,532,785,000 | 39.949495 | 78 | 0.620375 | false |
txominpelu/airflow | airflow/jobs.py | 11 | 24429 | from builtins import str
from past.builtins import basestring
from collections import defaultdict
from datetime import datetime
import getpass
import logging
import signal
import socket
import subprocess
import sys
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings, utils
from airflow.configuration import conf
from airflow.utils import AirflowException, State
Base = models.Base
ID_LEN = models.ID_LEN
# Setting up a statsd client if needed
statsd = None
if conf.get('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.getint('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'))
class BaseJob(Base):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getint('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.gethostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
logging.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
logging.debug('[heart] Boom.')
def run(self):
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplemented("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
*args, **kwargs):
self.dag_id = dag_id
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
@utils.provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm += dag.schedule_interval
while dttm < datetime.now():
if dttm + task.sla + dag.schedule_interval < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm += dag.schedule_interval
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
from airflow import ascii
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n{ascii.bug}<code></pre>
""".format(**locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
utils.send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
for sla in slas:
sla.email_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def process_dag(self, dag, executor):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
db_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (
datetime.now() - last_scheduler_run).total_seconds()
# if db_dag.scheduler_lock or
if secs_since_last < self.heartrate:
session.commit()
session.close()
return None
else:
# Taking a lock
db_dag.scheduler_lock = True
db_dag.last_scheduler_run = datetime.now()
session.commit()
TI = models.TaskInstance
logging.info(
"Getting latest instance "
"for all task in dag " + dag.dag_id)
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.group_by(TI.task_id).subquery('sq')
)
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
)
logging.debug("Querying max dates for each task")
latest_ti = qry.all()
ti_dict = {ti.task_id: ti for ti in latest_ti}
session.expunge_all()
session.commit()
logging.debug("{} rows returned".format(len(latest_ti)))
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in ti_dict:
# Brand new task, let's get started
ti = TI(task, task.start_date)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.info(
'First run for {ti}'.format(**locals()))
executor.queue_task_instance(ti)
else:
ti = ti_dict[task.task_id]
ti.task = task # Hacky but worky
if ti.state == State.RUNNING:
continue # Only one task at a time
elif ti.state == State.UP_FOR_RETRY:
# If task instance if up for retry, make sure
# the retry delay is met
if ti.is_runnable():
logging.debug('Triggering retry: ' + str(ti))
executor.queue_task_instance(ti)
elif ti.state == State.QUEUED:
# If was queued we skipped so that in gets prioritized
# in self.prioritize_queued
continue
else:
# Trying to run the next schedule
next_schedule = (
ti.execution_date + task.schedule_interval)
if (
ti.task.end_date and
next_schedule > ti.task.end_date):
continue
ti = TI(
task=task,
execution_date=next_schedule,
)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.debug('Queuing next run: ' + str(ti))
executor.queue_task_instance(ti)
# Releasing the lock
logging.debug("Unlocking DAG (scheduler_lock)")
db_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag.dag_id)
.first()
)
db_dag.scheduler_lock = False
session.merge(db_dag)
session.commit()
session.close()
@utils.provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if (
ti.dag_id not in dagbag.dags or not
dagbag.dags[ti.dag_id].has_task(ti.task_id)):
# Deleting queued jobs that don't exist anymore
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
for pool, tis in list(d.items()):
open_slots = pools[pool].open_slots(session=session)
if open_slots > 0:
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis[:open_slots]:
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
logging.error("Queued task {} seems gone".format(ti))
session.delete(ti)
if task:
ti.task = task
if ti.are_dependencies_met():
executor.queue_task_instance(ti, force=True)
else:
session.delete(ti)
session.commit()
def _execute(self):
dag_id = self.dag_id
def signal_handler(signum, frame):
logging.error("SIGINT (ctrl-c) received")
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
utils.pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = dagbag.executor
executor.start()
i = 0
while not self.num_runs or self.num_runs > i:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
logging.exception(e)
i += 1
try:
if i % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except:
logging.error("Failed at reloading the dagbag")
if statsd:
statsd.incr('dag_refresh_error', 1, 1)
sleep(5)
if dag_id:
dags = [dagbag.dags[dag_id]]
else:
dags = [
dag for dag in dagbag.dags.values() if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
for dag in dags:
logging.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag or (dag.dag_id in paused_dag_ids):
continue
try:
self.process_dag(dag, executor)
self.manage_slas(dag)
except Exception as e:
logging.exception(e)
logging.info(
"Done queuing tasks, calling the executor's heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
logging.info("Loop took: {} seconds".format(duration_sec))
try:
self.import_errors(dagbag)
except Exception as e:
logging.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
logging.exception(e)
logging.error("Tachycardia!")
def heartbeat_callback(self):
if statsd:
statsd.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
*args, **kwargs):
self.dag = dag
dag.override_start_date(start_date)
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
# Build a list of all instances to run
tasks_to_run = {}
failed = []
succeeded = []
started = []
wont_run = []
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in utils.date_range(
start_date, end_date, task.dag.schedule_interval):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
# Triggering what is ready to get triggered
while tasks_to_run:
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS and key in tasks_to_run:
succeeded.append(key)
del tasks_to_run[key]
elif ti.is_runnable():
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
task_start_date=self.bf_start_date,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies)
ti.state = State.RUNNING
if key not in started:
started.append(key)
self.heartbeat()
executor.heartbeat()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
if ti.state == State.FAILED:
failed.append(key)
logging.error("Task instance " + str(key) + " failed")
del tasks_to_run[key]
# Removing downstream tasks from the one that has failed
for t in self.dag.get_task(task_id).get_flat_relatives(
upstream=False):
key = (ti.dag_id, t.task_id, execution_date)
if key in tasks_to_run:
wont_run.append(key)
del tasks_to_run[key]
elif ti.state == State.SUCCESS:
succeeded.append(key)
del tasks_to_run[key]
msg = (
"[backfill progress] "
"waiting: {0} | "
"succeeded: {1} | "
"kicked_off: {2} | "
"failed: {3} | "
"skipped: {4} ").format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(wont_run))
logging.info(msg)
executor.end()
session.close()
if failed:
raise AirflowException(
"Some tasks instances failed, here's the list:\n"+str(failed))
logging.info("All done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
force=False,
mark_success=False,
pickle_id=None,
task_start_date=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.force = force
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_start_date = task_start_date
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
task_start_date=self.task_start_date,
job_id=self.id,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
| apache-2.0 | 6,811,615,128,299,790,000 | 33.749644 | 80 | 0.527406 | false |
agiliq/django-graphos | graphos/renderers/flot.py | 1 | 3255 | import json
from .base import BaseChart
from ..utils import get_default_options, JSONEncoderForHTML
class BaseFlotChart(BaseChart):
""" LineChart """
def get_serieses(self):
# Assuming self.data_source.data is:
# [['Year', 'Sales', 'Expenses'], [2004, 100, 200], [2005, 300, 250]]
data_only = self.get_data()[1:]
# first_column = [2004, 2005]
first_column = [el[0] for el in data_only]
serieses = []
for i in range(1, len(self.header)):
current_column = [el[i] for el in data_only]
current_series = self.zip_list(first_column, current_column)
serieses.append(current_series)
# serieses = [[(2004, 100), (2005, 300)], [(2004, 200), (2005, 250)]]
return serieses
def get_series_objects(self):
series_objects = []
serieses = self.get_serieses()
for i in range(1, len(self.header)):
series_object = {}
series_object['label'] = self.header[i]
series_object['data'] = serieses[i - 1]
series_objects.append(series_object)
# series_objects = [{'label': 'Sales', 'data': [(2004, 100), (2005, 300)]}, {'label': 'Expenses': 'data': [(2004, 100), (2005, 300)]}]
return series_objects
def get_series_pie_objects(self):
series_objects = []
serieses = self.get_data()[1:]
try:
for i in serieses:
series_object = {}
series_object['label'] = i[0]
series_object['data'] = i[1]
series_objects.append(series_object)
except IndexError:
print("Input Data Format is [['Year', 'Sales'], [2004, 100], [2005, 300]]")
# series_objects = [{'label': '2004', 'data': 100}, {'label': '2005': 'data': 300}]
return json.dumps(series_objects, cls=JSONEncoderForHTML)
def get_series_objects_json(self):
return json.dumps(self.get_series_objects(), cls=JSONEncoderForHTML)
def get_options(self):
options = get_default_options()
options.update(self.options)
return options
def get_html_template(self):
return 'graphos/flot/html.html'
def get_js_template(self):
return 'graphos/flot/js.html'
class PointChart(BaseFlotChart):
def get_options(self):
options = get_default_options("points")
options.update(self.options)
return options
class LineChart(BaseFlotChart):
""" LineChart """
def get_options(self):
options = get_default_options("lines")
options.update(self.options)
return options
class BarChart(BaseFlotChart):
def get_options(self):
options = get_default_options("bars")
options.update(self.options)
return options
class ColumnChart(BaseFlotChart):
def get_options(self):
options = get_default_options("bars")
options.update(self.options)
options["horizontal"] = True
return options
class PieChart(BaseFlotChart):
def get_options(self):
options = get_default_options("pie")
options.update(self.options)
return options
def get_js_template(self):
return 'graphos/flot/pie_chart.html'
| bsd-2-clause | 2,979,096,463,640,294,000 | 29.707547 | 142 | 0.589862 | false |
nammaste6/kafka | system_test/utils/setup_utils.py | 117 | 1848 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# =================================================================
# setup_utils.py
# - This module provides some basic helper functions.
# =================================================================
import logging
import kafka_system_test_utils
import sys
class SetupUtils(object):
# dict to pass user-defined attributes to logger argument: "extra"
# to use: just update "thisClassName" to the appropriate value
thisClassName = '(ReplicaBasicTest)'
d = {'name_of_class': thisClassName}
logger = logging.getLogger("namedLogger")
anonLogger = logging.getLogger("anonymousLogger")
def __init__(self):
d = {'name_of_class': self.__class__.__name__}
self.logger.debug("#### constructor inside SetupUtils", extra=self.d)
def log_message(self, message):
print
self.anonLogger.info("======================================================")
self.anonLogger.info(message)
self.anonLogger.info("======================================================")
| apache-2.0 | -7,173,650,353,860,619,000 | 38.319149 | 86 | 0.623377 | false |
bertucho/moviestalk2 | venv/Lib/encodings/mac_greek.py | 593 | 13977 | """ Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xb9' # 0x81 -> SUPERSCRIPT ONE
u'\xb2' # 0x82 -> SUPERSCRIPT TWO
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xb3' # 0x84 -> SUPERSCRIPT THREE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0384' # 0x8B -> GREEK TONOS
u'\xa8' # 0x8C -> DIAERESIS
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xa3' # 0x92 -> POUND SIGN
u'\u2122' # 0x93 -> TRADE MARK SIGN
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u2022' # 0x96 -> BULLET
u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
u'\u2030' # 0x98 -> PER MILLE SIGN
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xa6' # 0x9B -> BROKEN BAR
u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\xa7' # 0xAC -> SECTION SIGN
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xb0' # 0xAE -> DEGREE SIGN
u'\xb7' # 0xAF -> MIDDLE DOT
u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
u'\xac' # 0xC2 -> NOT SIGN
u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2015' # 0xD1 -> HORIZONTAL BAR
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit | -8,758,782,814,037,390,000 | 44.527687 | 112 | 0.555699 | false |
vbshah1992/microblog | flask/lib/python2.7/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py | 17 | 13150 | # sqlite/pysqlite.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database via pysqlite.
Note that pysqlite is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Full documentation on pysqlite is available at:
`<http://www.initd.org/pub/software/pysqlite/doc/usage-guide.html>`_
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database" portion of
the URL. Note that the format of a url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to the
**right** of the third slash. So connecting to a relative filepath looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you need **four**
slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be used.
Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME
or TIME types...confused yet ?) will not perform any bind parameter or result
processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older versions
of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that pysqlite
connections are still not safe to use in concurrently in multiple threads.
In particular, any statement execution calls would need to be externally
mutexed, as Pysqlite does not provide for thread-safe propagation of error
messages among other things. So while even ``:memory:`` databases can be
shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default will use
:class:`.SingletonThreadPool`. This pool maintains a single connection per
thread, so that all access to the engine within the current thread use the
same ``:memory:`` database - other threads would access a different
``:memory:`` database.
* When a file-based database is specified, the dialect will use :class:`.NullPool`
as the source of connections. This pool closes and discards connections
which are returned to the pool immediately. SQLite file-based connections
have extremely low overhead, so pooling is not necessary. The scheme also
prevents a connection from being used again in a different thread and works
best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same connection
object must be shared among threads, since the database exists
only within the scope of that connection. The :class:`.StaticPool` implementation
will maintain a single connection globally, and the ``check_same_thread`` flag
can be passed to Pysqlite as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a temporary table
in a file-based SQLite database across multiple checkouts from the connection pool, such
as when using an ORM :class:`.Session` where the temporary table should continue to remain
after :meth:`.commit` or :meth:`.rollback` is called,
a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool`
if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number of threads
that are to be used; beyond that number, connections will be closed out in a non deterministic
way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets, never
plain strings, and accommodates ``unicode`` objects within bound parameter
values in all cases. Regardless of the SQLAlchemy string type in use,
string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable Transaction Isolation
----------------------------------
The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
To work around this issue, the ``BEGIN`` keyword can be emitted
at the start of each transaction. The following recipe establishes
a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN")
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date:_SQLite_pysqliteDate,
sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
}
)
# Py3K
#description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError, e:
try:
from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| bsd-3-clause | -8,966,660,639,384,612,000 | 39.58642 | 98 | 0.700228 | false |
NL66278/odoo | addons/google_account/controllers/main.py | 350 | 1270 | import simplejson
import urllib
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.web.controllers.main as webmain
from openerp.addons.web.http import SessionExpiredException
from werkzeug.exceptions import BadRequest
import werkzeug.utils
class google_auth(http.Controller):
@http.route('/google_account/authentication', type='http', auth="none")
def oauth2callback(self, **kw):
""" This route/function is called by Google when user Accept/Refuse the consent of Google """
state = simplejson.loads(kw['state'])
dbname = state.get('d')
service = state.get('s')
url_return = state.get('f')
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
if kw.get('code',False):
registry.get('google.%s' % service).set_all_tokens(cr,request.session.uid,kw['code'])
return werkzeug.utils.redirect(url_return)
elif kw.get('error'):
return werkzeug.utils.redirect("%s%s%s" % (url_return ,"?error=" , kw.get('error')))
else:
return werkzeug.utils.redirect("%s%s" % (url_return ,"?error=Unknown_error"))
| agpl-3.0 | -5,090,882,520,698,320,000 | 38.6875 | 101 | 0.640945 | false |
repotvsupertuga/repo | plugin.video.TVsupertuga/resources/lib/zsources/xmovies.py | 4 | 4807 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,time
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import cache
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['xmovies8.tv', 'xmovies8.ru']
self.base_link = 'https://xmovies8.ru'
self.moviesearch_link = '/movies/search?s=%s'
def movie(self, imdb, title, localtitle, year):
try:
url = self.searchMovie(title, year)
if url == None:
t = cache.get(self.getImdbTitle, 900, imdb)
if t != title:
url = self.searchMovie(t, year)
return url
except:
return
def getImdbTitle(self, imdb):
try:
t = 'http://www.omdbapi.com/?i=%s' % imdb
t = client.request(t)
t = json.loads(t)
t = cleantitle.normalize(t['Title'])
return t
except:
return
def searchMovie(self, title, year):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.moviesearch_link % (cleantitle.geturl(title.replace('\'', '-'))))
r = client.request(url)
t = cleantitle.get(title)
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
url = path = re.sub('/watching.html$', '', url.strip('/'))
url = referer = url + '/watching.html'
p = client.request(url)
p = re.findall('load_player\(.+?(\d+)', p)
p = urllib.urlencode({'id': p[0]})
headers = {
'Accept-Formating': 'application/json, text/javascript',
'Server': 'cloudflare-nginx',
'Referer': referer}
r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3')
r = client.request(r, post=p, headers=headers, XHR=True)
url = json.loads(r)['value']
url = client.request(url, headers=headers, XHR=True, output='geturl')
if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,'debridonly': False})
raise Exception()
r = client.request(url, headers=headers, XHR=True)
try:
src = json.loads(r)['playlist'][0]['sources']
links = [i['file'] for i in src if 'file' in i]
for i in links:
try:
sources.append(
{'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
'url': i, 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return | gpl-2.0 | 6,883,103,737,016,534,000 | 35.984615 | 141 | 0.527772 | false |
njase/numpy | numpy/distutils/command/build.py | 187 | 1618 | from __future__ import division, absolute_import, print_function
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)
| bsd-3-clause | -1,218,521,762,569,333,000 | 33.425532 | 77 | 0.582818 | false |
georgyberdyshev/ascend | pygtk/canvas/asclibrary.py | 1 | 2535 | '''Import the SWIG wrapper'''
import os
DEFAULT_CANVAS_MODEL_LIBRARY_FOLDER = os.path.join('..','..','models','test','canvas')
try:
import ascpy
except ImportError as e:
print "Error: Could not load ASCEND Library. Please check the paths \
ASECNDLIBRARY and LD_LIBRARY_PATH\n",e
from blocktype import BlockType
from blockstream import BlockStream
class ascPy(object):
'''
The ASCEND Library class. Everything that talks to ASCEND should be here.
'''
def __init__(self):
self.library = None
self.annodb = None
self.modules = None
self.types = None
self.canvas_blocks = []
self.streams = []
self.reporter = ascpy.getReporter()
self.defaultlibraryfolder = DEFAULT_CANVAS_MODEL_LIBRARY_FOLDER
def load_library(self,lib_name = None):
if lib_name == None:
return
lib_path = os.path.join('test','canvas',lib_name)
try:
self.library.clear()
self.library.load(lib_path)
except Exception as e:
self.library = ascpy.Library()
self.library.load(lib_path)
self.annodb = self.library.getAnnotationDatabase()
self.modules = self.library.getModules()
try:
self.blocktypes = set()
self.streamtypes = set()
for m in self.modules:
self.types = self.library.getModuleTypes(m)
for t in self.types:
#if t.hasParameters():
# continue
self.parse_types(t)
self.parse_streams(t)
except Exception as e:
print 'Error: ASCEND Blocks Could not be loaded \n',e
exit()
try:
del self.canvas_blocks[:]
for t in self.blocktypes:
b = BlockType(t,self.annodb)
self.canvas_blocks +=[b]
except Exception as e:
print 'Error: Could not load blocktypes \n',e
exit()
try:
for stream in self.streamtypes:
s = BlockStream(stream,self.annodb)
self.streams +=[s]
except Exception as e:
print 'Error: Could not load streams \n',e
exit()
'''
try:
for stream in streamtypes:
notes = self.annodb.getTypeRefinedNotesLang(stream,
ascpy.SymChar("inline"))
for n in notes:
types = str(n.getText()).split(',')
self.streams.append((str(n.getId()),types))
except Exception as e:
print 'Error: Could not load streamtypes \n',e
exit()
'''
def parse_types(self,t):
x = self.annodb.getNotes(t,ascpy.SymChar("block"),ascpy.SymChar("SELF"))
if x:
self.blocktypes.add(t)
def parse_streams(self,t):
x = self.annodb.getNotes(t,ascpy.SymChar("stream"),ascpy.SymChar("SELF"))
if x:
self.streamtypes.add(t)
# vim: set ts=4 noet:
| gpl-2.0 | 8,757,188,142,192,394,000 | 25.134021 | 86 | 0.657594 | false |
angelman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py | 118 | 11747 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
class TestRunResults(object):
def __init__(self, expectations, num_tests):
self.total = num_tests
self.remaining = self.total
self.expectations = expectations
self.expected = 0
self.unexpected = 0
self.unexpected_failures = 0
self.unexpected_crashes = 0
self.unexpected_timeouts = 0
self.tests_by_expectation = {}
self.tests_by_timeline = {}
self.results_by_name = {} # Map of test name to the last result for the test.
self.all_results = [] # All results from a run, including every iteration of every test.
self.unexpected_results_by_name = {}
self.failures_by_name = {}
self.total_failures = 0
self.expected_skips = 0
for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
self.tests_by_expectation[expectation] = set()
for timeline in test_expectations.TestExpectations.TIMELINES.values():
self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
self.slow_tests = set()
self.interrupted = False
def add(self, test_result, expected, test_is_slow):
self.tests_by_expectation[test_result.type].add(test_result.test_name)
self.results_by_name[test_result.test_name] = test_result
if test_result.type != test_expectations.SKIP:
self.all_results.append(test_result)
self.remaining -= 1
if len(test_result.failures):
self.total_failures += 1
self.failures_by_name[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
if test_result.type == test_expectations.SKIP:
self.expected_skips += 1
else:
self.unexpected_results_by_name[test_result.test_name] = test_result
self.unexpected += 1
if len(test_result.failures):
self.unexpected_failures += 1
if test_result.type == test_expectations.CRASH:
self.unexpected_crashes += 1
elif test_result.type == test_expectations.TIMEOUT:
self.unexpected_timeouts += 1
if test_is_slow:
self.slow_tests.add(test_result.test_name)
class RunDetails(object):
def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
self.exit_code = exit_code
self.summarized_results = summarized_results
self.initial_results = initial_results
self.retry_results = retry_results
self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
def _interpret_test_failures(failures):
test_dict = {}
failure_types = [type(failure) for failure in failures]
# FIXME: get rid of all this is_* values once there is a 1:1 map between
# TestFailure type and test_expectations.EXPECTATION.
if test_failures.FailureMissingAudio in failure_types:
test_dict['is_missing_audio'] = True
if test_failures.FailureMissingResult in failure_types:
test_dict['is_missing_text'] = True
if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
test_dict['is_missing_image'] = True
if 'image_diff_percent' not in test_dict:
for failure in failures:
if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
test_dict['image_diff_percent'] = failure.diff_percent
return test_dict
def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
"""Returns a dictionary containing a summary of the test runs, with the following fields:
'version': a version indicator
'fixable': The number of fixable tests (NOW - PASS)
'skipped': The number of skipped tests (NOW & SKIPPED)
'num_regressions': The number of non-flaky failures
'num_flaky': The number of flaky failures
'num_missing': The number of tests with missing results
'num_passes': The number of unexpected passes
'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
"""
results = {}
results['version'] = 3
tbe = initial_results.tests_by_expectation
tbt = initial_results.tests_by_timeline
results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
num_passes = 0
num_flaky = 0
num_missing = 0
num_regressions = 0
keywords = {}
for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
keywords[expectation_enum] = expecation_string.upper()
for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
keywords[modifier_enum] = modifier_string.upper()
tests = {}
for test_name, result in initial_results.results_by_name.iteritems():
# Note that if a test crashed in the original run, we ignore
# whether or not it crashed when we retried it (if we retried it),
# and always consider the result not flaky.
expected = expectations.get_expectations_string(test_name)
result_type = result.type
actual = [keywords[result_type]]
if result_type == test_expectations.SKIP:
continue
test_dict = {}
if result.has_stderr:
test_dict['has_stderr'] = True
if result.reftest_type:
test_dict.update(reftest_type=list(result.reftest_type))
if expectations.has_modifier(test_name, test_expectations.WONTFIX):
test_dict['wontfix'] = True
if result_type == test_expectations.PASS:
num_passes += 1
# FIXME: include passing tests that have stderr output.
if expected == 'PASS':
continue
elif result_type == test_expectations.CRASH:
if test_name in initial_results.unexpected_results_by_name:
num_regressions += 1
elif result_type == test_expectations.MISSING:
if test_name in initial_results.unexpected_results_by_name:
num_missing += 1
elif test_name in initial_results.unexpected_results_by_name:
if retry_results and test_name not in retry_results.unexpected_results_by_name:
actual.extend(expectations.get_expectations_string(test_name).split(" "))
num_flaky += 1
elif retry_results:
retry_result_type = retry_results.unexpected_results_by_name[test_name].type
if result_type != retry_result_type:
if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
num_regressions += 1
else:
num_flaky += 1
actual.append(keywords[retry_result_type])
else:
num_regressions += 1
else:
num_regressions += 1
test_dict['expected'] = expected
test_dict['actual'] = " ".join(actual)
test_dict.update(_interpret_test_failures(result.failures))
if retry_results:
retry_result = retry_results.unexpected_results_by_name.get(test_name)
if retry_result:
test_dict.update(_interpret_test_failures(retry_result.failures))
# Store test hierarchically by directory. e.g.
# foo/bar/baz.html: test_dict
# foo/bar/baz1.html: test_dict
#
# becomes
# foo: {
# bar: {
# baz.html: test_dict,
# baz1.html: test_dict
# }
# }
parts = test_name.split('/')
current_map = tests
for i, part in enumerate(parts):
if i == (len(parts) - 1):
current_map[part] = test_dict
break
if part not in current_map:
current_map[part] = {}
current_map = current_map[part]
results['tests'] = tests
results['num_passes'] = num_passes
results['num_flaky'] = num_flaky
results['num_missing'] = num_missing
results['num_regressions'] = num_regressions
results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
results['layout_tests_dir'] = port_obj.layout_tests_dir()
results['has_wdiff'] = port_obj.wdiff_available()
results['has_pretty_patch'] = port_obj.pretty_patch_available()
results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
try:
# We only use the svn revision for using trac links in the results.html file,
# Don't do this by default since it takes >100ms.
# FIXME: Do we really need to populate this both here and in the json_results_generator?
if port_obj.get_option("builder_name"):
port_obj.host.initialize_scm()
results['revision'] = port_obj.host.scm().head_svn_revision()
except Exception, e:
_log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
# Handle cases where we're running outside of version control.
import traceback
_log.debug('Failed to learn head svn revision:')
_log.debug(traceback.format_exc())
results['revision'] = ""
return results
| bsd-3-clause | -8,330,437,472,421,778,000 | 44.180769 | 220 | 0.650549 | false |
crazy-cat/incubator-mxnet | example/speech-demo/train_lstm_proj.py | 25 | 13880 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
sys.path.insert(0, "../../python")
import time
import logging
import os.path
import mxnet as mx
import numpy as np
from speechSGD import speechSGD
from lstm_proj import lstm_unroll
from io_util import BucketSentenceIter, TruncatedSentenceIter, DataReadStream
from config_util import parse_args, get_checkpoint_path, parse_contexts
# some constants
METHOD_BUCKETING = 'bucketing'
METHOD_TBPTT = 'truncated-bptt'
def prepare_data(args):
batch_size = args.config.getint('train', 'batch_size')
num_hidden = args.config.getint('arch', 'num_hidden')
num_hidden_proj = args.config.getint('arch', 'num_hidden_proj')
num_lstm_layer = args.config.getint('arch', 'num_lstm_layer')
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
if num_hidden_proj > 0:
init_h = [('l%d_init_h'%l, (batch_size, num_hidden_proj)) for l in range(num_lstm_layer)]
else:
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
file_train = args.config.get('data', 'train')
file_dev = args.config.get('data', 'dev')
file_format = args.config.get('data', 'format')
feat_dim = args.config.getint('data', 'xdim')
train_data_args = {
"gpu_chunk": 32768,
"lst_file": file_train,
"file_format": file_format,
"separate_lines": True
}
dev_data_args = {
"gpu_chunk": 32768,
"lst_file": file_dev,
"file_format": file_format,
"separate_lines": True
}
train_sets = DataReadStream(train_data_args, feat_dim)
dev_sets = DataReadStream(dev_data_args, feat_dim)
return (init_states, train_sets, dev_sets)
def CrossEntropy(labels, preds):
labels = labels.reshape((-1,))
preds = preds.reshape((-1, preds.shape[1]))
loss = 0.
num_inst = 0
for i in range(preds.shape[0]):
label = labels[i]
if label > 0:
loss += -np.log(max(1e-10, preds[i][int(label)]))
num_inst += 1
return loss , num_inst
def Acc_exclude_padding(labels, preds):
labels = labels.reshape((-1,))
preds = preds.reshape((-1, preds.shape[1]))
sum_metric = 0
num_inst = 0
for i in range(preds.shape[0]):
pred_label = np.argmax(preds[i], axis=0)
label = labels[i]
ind = np.nonzero(label.flat)
pred_label_real = pred_label.flat[ind]
label_real = label.flat[ind]
sum_metric += (pred_label_real == label_real).sum()
num_inst += len(pred_label_real)
return sum_metric, num_inst
class SimpleLRScheduler(mx.lr_scheduler.LRScheduler):
"""A simple lr schedule that simply return `dynamic_lr`. We will set `dynamic_lr`
dynamically based on performance on the validation set.
"""
def __init__(self, dynamic_lr, effective_sample_count=1, momentum=0.9, optimizer="sgd"):
super(SimpleLRScheduler, self).__init__()
self.dynamic_lr = dynamic_lr
self.effective_sample_count = effective_sample_count
self.momentum = momentum
self.optimizer = optimizer
def __call__(self, num_update):
if self.optimizer == "speechSGD":
return self.dynamic_lr / self.effective_sample_count, self.momentum
else:
return self.dynamic_lr / self.effective_sample_count
def score_with_state_forwarding(module, eval_data, eval_metric):
eval_data.reset()
eval_metric.reset()
for eval_batch in eval_data:
module.forward(eval_batch, is_train=False)
module.update_metric(eval_metric, eval_batch.label)
# copy over states
outputs = module.get_outputs()
# outputs[0] is softmax, 1:end are states
for i in range(1, len(outputs)):
outputs[i].copyto(eval_data.init_state_arrays[i-1])
def get_initializer(args):
init_type = getattr(mx.initializer, args.config.get('train', 'initializer'))
init_scale = args.config.getfloat('train', 'init_scale')
if init_type is mx.initializer.Xavier:
return mx.initializer.Xavier(magnitude=init_scale)
return init_type(init_scale)
def do_training(training_method, args, module, data_train, data_val):
from distutils.dir_util import mkpath
mkpath(os.path.dirname(get_checkpoint_path(args)))
batch_size = data_train.batch_size
batch_end_callbacks = [mx.callback.Speedometer(batch_size,
args.config.getint('train', 'show_every'))]
eval_allow_extra = True if training_method == METHOD_TBPTT else False
eval_metric = [mx.metric.np(CrossEntropy, allow_extra_outputs=eval_allow_extra),
mx.metric.np(Acc_exclude_padding, allow_extra_outputs=eval_allow_extra)]
eval_metric = mx.metric.create(eval_metric)
optimizer = args.config.get('train', 'optimizer')
momentum = args.config.getfloat('train', 'momentum')
learning_rate = args.config.getfloat('train', 'learning_rate')
lr_scheduler = SimpleLRScheduler(learning_rate, momentum=momentum, optimizer=optimizer)
if training_method == METHOD_TBPTT:
lr_scheduler.seq_len = data_train.truncate_len
n_epoch = 0
num_epoch = args.config.getint('train', 'num_epoch')
learning_rate = args.config.getfloat('train', 'learning_rate')
decay_factor = args.config.getfloat('train', 'decay_factor')
decay_bound = args.config.getfloat('train', 'decay_lower_bound')
clip_gradient = args.config.getfloat('train', 'clip_gradient')
weight_decay = args.config.getfloat('train', 'weight_decay')
if clip_gradient == 0:
clip_gradient = None
last_acc = -float("Inf")
last_params = None
module.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
module.init_params(initializer=get_initializer(args))
def reset_optimizer():
if optimizer == "sgd" or optimizer == "speechSGD":
module.init_optimizer(kvstore='device',
optimizer=args.config.get('train', 'optimizer'),
optimizer_params={'lr_scheduler': lr_scheduler,
'momentum': momentum,
'rescale_grad': 1.0,
'clip_gradient': clip_gradient,
'wd': weight_decay},
force_init=True)
else:
module.init_optimizer(kvstore='device',
optimizer=args.config.get('train', 'optimizer'),
optimizer_params={'lr_scheduler': lr_scheduler,
'rescale_grad': 1.0,
'clip_gradient': clip_gradient,
'wd': weight_decay},
force_init=True)
reset_optimizer()
while True:
tic = time.time()
eval_metric.reset()
for nbatch, data_batch in enumerate(data_train):
if training_method == METHOD_TBPTT:
lr_scheduler.effective_sample_count = data_train.batch_size * truncate_len
lr_scheduler.momentum = np.power(np.power(momentum, 1.0/(data_train.batch_size * truncate_len)), data_batch.effective_sample_count)
else:
if data_batch.effective_sample_count is not None:
lr_scheduler.effective_sample_count = 1#data_batch.effective_sample_count
module.forward_backward(data_batch)
module.update()
module.update_metric(eval_metric, data_batch.label)
batch_end_params = mx.model.BatchEndParam(epoch=n_epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=None)
for callback in batch_end_callbacks:
callback(batch_end_params)
if training_method == METHOD_TBPTT:
# copy over states
outputs = module.get_outputs()
# outputs[0] is softmax, 1:end are states
for i in range(1, len(outputs)):
outputs[i].copyto(data_train.init_state_arrays[i-1])
for name, val in eval_metric.get_name_value():
logging.info('Epoch[%d] Train-%s=%f', n_epoch, name, val)
toc = time.time()
logging.info('Epoch[%d] Time cost=%.3f', n_epoch, toc-tic)
data_train.reset()
# test on eval data
score_with_state_forwarding(module, data_val, eval_metric)
# test whether we should decay learning rate
curr_acc = None
for name, val in eval_metric.get_name_value():
logging.info("Epoch[%d] Dev-%s=%f", n_epoch, name, val)
if name == 'CrossEntropy':
curr_acc = val
assert curr_acc is not None, 'cannot find Acc_exclude_padding in eval metric'
if n_epoch > 0 and lr_scheduler.dynamic_lr > decay_bound and curr_acc > last_acc:
logging.info('Epoch[%d] !!! Dev set performance drops, reverting this epoch',
n_epoch)
logging.info('Epoch[%d] !!! LR decay: %g => %g', n_epoch,
lr_scheduler.dynamic_lr, lr_scheduler.dynamic_lr / float(decay_factor))
lr_scheduler.dynamic_lr /= decay_factor
# we reset the optimizer because the internal states (e.g. momentum)
# might already be exploded, so we want to start from fresh
reset_optimizer()
module.set_params(*last_params)
else:
last_params = module.get_params()
last_acc = curr_acc
n_epoch += 1
# save checkpoints
mx.model.save_checkpoint(get_checkpoint_path(args), n_epoch,
module.symbol, *last_params)
if n_epoch == num_epoch:
break
if __name__ == '__main__':
args = parse_args()
args.config.write(sys.stdout)
training_method = args.config.get('train', 'method')
contexts = parse_contexts(args)
init_states, train_sets, dev_sets = prepare_data(args)
state_names = [x[0] for x in init_states]
batch_size = args.config.getint('train', 'batch_size')
num_hidden = args.config.getint('arch', 'num_hidden')
num_hidden_proj = args.config.getint('arch', 'num_hidden_proj')
num_lstm_layer = args.config.getint('arch', 'num_lstm_layer')
feat_dim = args.config.getint('data', 'xdim')
label_dim = args.config.getint('data', 'ydim')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
if training_method == METHOD_BUCKETING:
buckets = args.config.get('train', 'buckets')
buckets = list(map(int, re.split(r'\W+', buckets)))
data_train = BucketSentenceIter(train_sets, buckets, batch_size, init_states, feat_dim=feat_dim)
data_val = BucketSentenceIter(dev_sets, buckets, batch_size, init_states, feat_dim=feat_dim)
def sym_gen(seq_len):
sym = lstm_unroll(num_lstm_layer, seq_len, feat_dim, num_hidden=num_hidden,
num_label=label_dim, num_hidden_proj=num_hidden_proj)
data_names = ['data'] + state_names
label_names = ['softmax_label']
return (sym, data_names, label_names)
module = mx.mod.BucketingModule(sym_gen,
default_bucket_key=data_train.default_bucket_key,
context=contexts)
do_training(training_method, args, module, data_train, data_val)
elif training_method == METHOD_TBPTT:
truncate_len = args.config.getint('train', 'truncate_len')
data_train = TruncatedSentenceIter(train_sets, batch_size, init_states,
truncate_len=truncate_len, feat_dim=feat_dim)
data_val = TruncatedSentenceIter(dev_sets, batch_size, init_states,
truncate_len=truncate_len, feat_dim=feat_dim,
do_shuffling=False, pad_zeros=True)
sym = lstm_unroll(num_lstm_layer, truncate_len, feat_dim, num_hidden=num_hidden,
num_label=label_dim, output_states=True, num_hidden_proj=num_hidden_proj)
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
module = mx.mod.Module(sym, context=contexts, data_names=data_names,
label_names=label_names)
do_training(training_method, args, module, data_train, data_val)
else:
raise RuntimeError('Unknown training method: %s' % training_method)
print("="*80)
print("Finished Training")
print("="*80)
args.config.write(sys.stdout)
| apache-2.0 | -3,840,874,098,969,915,000 | 41.446483 | 147 | 0.596037 | false |
tectronics/open-ihm | src/openihm/gui/interface/frmproject_configure_wildfoodincome.py | 3 | 6232 | #!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from control.controller import Controller
from mixins import TableViewMixin
class WildfoodIncomeManager(TableViewMixin):
def getProjectWildfoods(self):
incomes = []
row = 0
while (self.tblSelectedWildfoods.model().item(row,0)):
val = self.tblSelectedWildfoods.model().item(row,0).text()
incomes.append(val)
row = row + 1
return incomes
def displayAvailableWildfoods(self):
''' Retrieve and display available wildfood '''
incomes = self.project.getFoodIncomes("wildfoods")
model = QStandardItemModel(1,1)
# set model headers
model.setHorizontalHeaderItem(0,QStandardItem('Income Source'))
# add data rows
num = 0
for income in incomes:
qtIncome = QStandardItem( income)
model.setItem( num, 0, qtIncome )
num = num + 1
self.tblAvailableWildfoods.setModel(model)
self.tblAvailableWildfoods.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.tblAvailableWildfoods.resizeColumnsToContents()
def displaySelectedWildfoods(self):
''' Retrieve and display Project Wildfood Incomes'''
incomes = self.project.getIncomeSources("wildfoods")
model = QStandardItemModel(1,1)
# set model headers
model.setHorizontalHeaderItem(0,QStandardItem('Income Source'))
# add data rows
num = 0
for income in incomes:
qtIncome = QStandardItem( income.name )
model.setItem( num, 0, qtIncome )
num = num + 1
self.tblSelectedWildfoods.setModel(model)
self.tblSelectedWildfoods.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.tblSelectedWildfoods.resizeColumnsToContents()
def moveAllWildfoods(self):
''' Add all available wildfoods to Project'''
row = 0
while( self.tblAvailableWildfoods.model().item(row,0)):
income = self.tblAvailableWildfoods.model().item(row,0).text()
currentProjectWildfoods = self.getProjectWildfoods()
if income not in currentProjectWildfoods:
self.project.addIncomeSource(income, "wildfoods")
else:
msg = "The income source labelled, %s, has already been added to project" % (income)
QMessageBox.information(self,"Project Configuration",msg)
row = row + 1
self.displaySelectedWildfoods()
def removeAllWildfoods(self):
''' remove all listed household or person characteristics from Project'''
msg = "Are you sure you want to remove all selected wildfoods from this project?"
ret = QMessageBox.question(self,"Confirm Deletion", msg, QMessageBox.Yes|QMessageBox.No)
# if deletion is rejected return without deleting
if ret == QMessageBox.No:
return
incomes = self.getProjectWildfoods()
self.project.deleteIncomeSources( incomes )
self.displaySelectedWildfoods()
def moveSelectedWildfoods(self):
''' Add selected available wildfoods to Project'''
numSelected = self.countRowsSelected(self.tblAvailableWildfoods)
if numSelected != 0:
selectedRows = self.getSelectedRows(self.tblAvailableWildfoods)
for row in selectedRows:
income = self.tblAvailableWildfoods.model().item(row,0).text()
currentProjectWildfoods = self.getProjectWildfoods()
if income not in currentProjectWildfoods:
self.project.addIncomeSource(income, "wildfoods")
else:
msg = "The income source labelled, %s, has already been added to project" % (income)
QMessageBox.information(self,"Project Configuration",msg)
self.displaySelectedWildfoods()
else:
msg = "Please select the wildfoods you want to add."
QMessageBox.information(self,"Project Configuration",msg)
def removeSelectedWildfoods(self):
''' remove selected wildfoods from Project'''
numSelected = self.countRowsSelected(self.tblSelectedWildfoods)
if numSelected != 0:
msg = "Are you sure you want to remove the selected wildfood(s) from this project?"
ret = QMessageBox.question(self,"Confirm Deletion", msg, QMessageBox.Yes|QMessageBox.No)
# if deletion is rejected return without deleting
if ret == QMessageBox.No:
return
selectedRows = self.getSelectedRows(self.tblSelectedWildfoods)
incomes = []
for row in selectedRows:
income = self.tblSelectedWildfoods.model().item(row,0).text()
incomes.append(income)
self.project.deleteIncomeSources( incomes )
self.displaySelectedWildfoods()
else:
msg = "Please select the wildfoods you want to remove."
QMessageBox.information(self,"Project Configuration",msg)
| lgpl-3.0 | -8,747,574,203,397,628,000 | 39.825503 | 105 | 0.608633 | false |
c2theg/DDoS_Information_Sharing | libraries/suds-jurko-0.6/suds/serviceproxy.py | 18 | 2838 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The service proxy provides access to web services.
Replaced by: L{client.Client}
"""
from suds import *
from suds.client import Client
class ServiceProxy(UnicodeMixin):
"""
A lightweight soap based web service proxy.
@ivar __client__: A client.
Everything is delegated to the 2nd generation API.
@type __client__: L{Client}
@note: Deprecated, replaced by L{Client}.
"""
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@keyword faults: Raise faults raised by server (default:True),
else return tuple from service method invocation as (http code, object).
@type faults: boolean
@keyword proxy: An http proxy to be specified on requests (default:{}).
The proxy is defined as {protocol:proxy,}
@type proxy: dict
"""
client = Client(url, **kwargs)
self.__client__ = client
def get_instance(self, name):
"""
Get an instance of a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def get_enum(self, name):
"""
Get an instance of an enumeration defined in the WSDL by name.
@param name: The name of a enumeration defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def __unicode__(self):
return unicode(self.__client__)
def __getattr__(self, name):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return getattr(self.__client__.service, name)
| mit | 1,272,644,993,432,399,600 | 34.475 | 88 | 0.636716 | false |
grupoprog3/proyecto_final | proyecto/flask/Lib/shutil.py | 1 | 41006 | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
import lzma
del lzma
_LZMA_SUPPORTED = True
except ImportError:
_LZMA_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which", "get_terminal_size",
"SameFileError"]
# disk_usage is added later, if available on the platform
class Error(OSError):
pass
class SameFileError(Error):
"""Raised when source and destination are the same file."""
class SpecialFileError(OSError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(OSError):
"""Raised when a command could not be executed"""
class ReadError(OSError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise SameFileError("{!r} and {!r} are the same file".format(src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
If source and destination are the same file, a SameFileError will be
raised.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore,
copy_function)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, 'winerror', None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except OSError:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
sep = os.path.sep + (os.path.altsep or '')
return os.path.basename(path.rstrip(sep))
def move(src, dst, copy_function=copy2):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
The optional `copy_function` argument is a callable that will be used
to copy the source or it will be delegated to `copytree`.
By default, copy2() is used, but any function that supports the same
signature (like copy()) can be used.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself"
" '%s'." % (src, dst))
copytree(src, real_dst, copy_function=copy_function,
symlinks=True)
rmtree(src)
else:
copy_function(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", "xz", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", ".bz2", or ".xz").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
if _LZMA_SUPPORTED:
tar_compression['xz'] = 'xz'
compress_ext['xz'] = '.xz'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
import zipfile
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
with zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED) as zf:
path = os.path.normpath(base_dir)
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
if _LZMA_SUPPORTED:
_ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')],
"xz'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
if _LZMA_SUPPORTED:
_UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [],
"xz'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (AttributeError, ValueError, OSError):
# stdout is None, closed, detached, or not a terminal, or
# os.get_terminal_size() is unsupported
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| apache-2.0 | -8,391,137,584,083,190,000 | 33.875766 | 86 | 0.583256 | false |
ChrisBeaumont/brut | bubbly/hyperopt.py | 2 | 2563 | """
A simple interface for random exploration of hyperparameter space
"""
import random
import numpy as np
from scipy import stats
from sklearn.metrics import auc
from sklearn import metrics as met
class Choice(object):
"""Randomly select from a list"""
def __init__(self, *choices):
self._choices = choices
def rvs(self):
return random.choice(self._choices)
class Space(object):
"""
Spaces gather and randomly sample
collections of hyperparameters
Any class with an rvs method is a valid hyperparameter
(e.g., anything in scipy.stats is a hyperparameter)
"""
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
def __iter__(self):
while True:
yield {k: v.rvs() for k, v in self._hyperparams.items()}
def auc_below_fpos(y, yp, fpos):
"""
Variant on the area under the ROC curve score
Only integrate the portion of the curve
to the left of a threshold in fpos
"""
fp, tp, th = met.roc_curve(y, yp)
good = (fp <= fpos)
return auc(fp[good], tp[good])
def fmin(objective, space, threshold=np.inf):
"""
Generator that randomly samples a space,
and yields whenever a new minimum is encountered
Parameters
----------
objective : A function which takes hyperparameters
as input, and computes an objective function and classifier
out output
space : the Space to sample
threshold : A threshold in the objective function values.
If provided, will not yield anything until
the objective function falls below threshold
Yields
------
Tuples of (objective function, parameter dict, classifier)
"""
best = threshold
try:
for p in space:
f, clf = objective(**p)
if f < best:
best = f
yield best, p, clf
except KeyboardInterrupt:
pass
#default space for Gradient Boosted Decision trees
gb_space = Space(learning_rate = stats.uniform(1e-3, 1 - 1.01e-3),
n_estimators = Choice(50, 100, 200),
max_depth = Choice(1, 2, 3),
subsample = stats.uniform(1e-3, 1 - 1.01e-3))
#default space for WiseRF random forests
rf_space = Space(n_estimators = Choice(200, 400, 800, 1600),
min_samples_split = Choice(1, 2, 4),
criterion = Choice('gini', 'gainratio', 'infogain'),
max_features = Choice('auto'),
n_jobs = Choice(2))
| mit | -6,405,223,350,560,046,000 | 25.978947 | 75 | 0.60359 | false |
Zhongqilong/mykbengineer | kbe/src/lib/python/Tools/scripts/fixdiv.py | 94 | 13938 | #! /usr/bin/env python3
"""fixdiv - tool to fix division operators.
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
This runs the script `yourscript.py' while writing warning messages
about all uses of the classic division operator to the file
`warnings'. The warnings look like this:
<file>:<line>: DeprecationWarning: classic <type> division
The warnings are written to stderr, so you must use `2>' for the I/O
redirect. I know of no way to redirect stderr on Windows in a DOS
box, so you will have to modify the script to set sys.stderr to some
kind of log file if you want to do this on Windows.
The warnings are not limited to the script; modules imported by the
script may also trigger warnings. In fact a useful technique is to
write a test script specifically intended to exercise all code in a
particular module or set of modules.
Then run `python fixdiv.py warnings'. This first reads the warnings,
looking for classic division warnings, and sorts them by file name and
line number. Then, for each file that received at least one warning,
it parses the file and tries to match the warnings up to the division
operators found in the source code. If it is successful, it writes
its findings to stdout, preceded by a line of dashes and a line of the
form:
Index: <file>
If the only findings found are suggestions to change a / operator into
a // operator, the output is acceptable input for the Unix 'patch'
program.
Here are the possible messages on stdout (N stands for a line number):
- A plain-diff-style change ('NcN', a line marked by '<', a line
containing '---', and a line marked by '>'):
A / operator was found that should be changed to //. This is the
recommendation when only int and/or long arguments were seen.
- 'True division / operator at line N' and a line marked by '=':
A / operator was found that can remain unchanged. This is the
recommendation when only float and/or complex arguments were seen.
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
A / operator was found for which int or long as well as float or
complex arguments were seen. This is highly unlikely; if it occurs,
you may have to restructure the code to keep the classic semantics,
or maybe you don't care about the classic semantics.
- 'No conclusive evidence on line N', line marked by '*':
A / operator was found for which no warnings were seen. This could
be code that was never executed, or code that was only executed
with user-defined objects as arguments. You will have to
investigate further. Note that // can be overloaded separately from
/, using __floordiv__. True division can also be separately
overloaded, using __truediv__. Classic division should be the same
as either of those. (XXX should I add a warning for division on
user-defined objects, to disambiguate this case from code that was
never executed?)
- 'Phantom ... warnings for line N', line marked by '*':
A warning was seen for a line not containing a / operator. The most
likely cause is a warning about code executed by 'exec' or eval()
(see note below), or an indirect invocation of the / operator, for
example via the div() function in the operator module. It could
also be caused by a change to the file between the time the test
script was run to collect warnings and the time fixdiv was run.
- 'More than one / operator in line N'; or
'More than one / operator per statement in lines N-N':
The scanner found more than one / operator on a single line, or in a
statement split across multiple lines. Because the warnings
framework doesn't (and can't) show the offset within the line, and
the code generator doesn't always give the correct line number for
operations in a multi-line statement, we can't be sure whether all
operators in the statement were executed. To be on the safe side,
by default a warning is issued about this case. In practice, these
cases are usually safe, and the -m option suppresses these warning.
- 'Can't find the / operator in line N', line marked by '*':
This really shouldn't happen. It means that the tokenize module
reported a '/' operator but the line it returns didn't contain a '/'
character at the indicated position.
- 'Bad warning for line N: XYZ', line marked by '*':
This really shouldn't happen. It means that a 'classic XYZ
division' warning was read with XYZ being something other than
'int', 'long', 'float', or 'complex'.
Notes:
- The augmented assignment operator /= is handled the same way as the
/ operator.
- This tool never looks at the // operator; no warnings are ever
generated for use of this operator.
- This tool never looks at the / operator when a future division
statement is in effect; no warnings are generated in this case, and
because the tool only looks at files for which at least one classic
division warning was seen, it will never look at files containing a
future division statement.
- Warnings may be issued for code not read from a file, but executed
using the exec() or eval() functions. These may have
<string> in the filename position, in which case the fixdiv script
will attempt and fail to open a file named '<string>' and issue a
warning about this failure; or these may be reported as 'Phantom'
warnings (see above). You're on your own to deal with these. You
could make all recommended changes and add a future division
statement to all affected files, and then re-run the test script; it
should not issue any warnings. If there are any, and you have a
hard time tracking down where they are generated, you can use the
-Werror option to force an error instead of a first warning,
generating a traceback.
- The tool should be run from the same directory as that from which
the original script was run, otherwise it won't be able to open
files given by relative pathnames.
"""
import sys
import getopt
import re
import tokenize
multi_ok = 0
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hm")
except getopt.error as msg:
usage(msg)
return 2
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-m":
global multi_ok
multi_ok = 1
if not args:
usage("at least one file argument is required")
return 2
if args[1:]:
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
warnings = readwarnings(args[0])
if warnings is None:
return 1
files = list(warnings.keys())
if not files:
print("No classic division warnings read from", args[0])
return
files.sort()
exit = None
for filename in files:
x = process(filename, warnings[filename])
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
PATTERN = ("^(.+?):(\d+): DeprecationWarning: "
"classic (int|long|float|complex) division$")
def readwarnings(warningsfile):
prog = re.compile(PATTERN)
try:
f = open(warningsfile)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return
warnings = {}
while 1:
line = f.readline()
if not line:
break
m = prog.match(line)
if not m:
if line.find("division") >= 0:
sys.stderr.write("Warning: ignored input " + line)
continue
filename, lineno, what = m.groups()
list = warnings.get(filename)
if list is None:
warnings[filename] = list = []
list.append((int(lineno), sys.intern(what)))
f.close()
return warnings
def process(filename, list):
print("-"*70)
assert list # if this fails, readwarnings() is broken
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return 1
print("Index:", filename)
f = FileContext(fp)
list.sort()
index = 0 # list[:index] has been processed, list[index:] is still to do
g = tokenize.generate_tokens(f.readline)
while 1:
startlineno, endlineno, slashes = lineinfo = scanline(g)
if startlineno is None:
break
assert startlineno <= endlineno is not None
orphans = []
while index < len(list) and list[index][0] < startlineno:
orphans.append(list[index])
index += 1
if orphans:
reportphantomwarnings(orphans, f)
warnings = []
while index < len(list) and list[index][0] <= endlineno:
warnings.append(list[index])
index += 1
if not slashes and not warnings:
pass
elif slashes and not warnings:
report(slashes, "No conclusive evidence")
elif warnings and not slashes:
reportphantomwarnings(warnings, f)
else:
if len(slashes) > 1:
if not multi_ok:
rows = []
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
rows.append(row)
lastrow = row
assert rows
if len(rows) == 1:
print("*** More than one / operator in line", rows[0])
else:
print("*** More than one / operator per statement", end=' ')
print("in lines %d-%d" % (rows[0], rows[-1]))
intlong = []
floatcomplex = []
bad = []
for lineno, what in warnings:
if what in ("int", "long"):
intlong.append(what)
elif what in ("float", "complex"):
floatcomplex.append(what)
else:
bad.append(what)
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
lastrow = row
line = chop(line)
if line[col:col+1] != "/":
print("*** Can't find the / operator in line %d:" % row)
print("*", line)
continue
if bad:
print("*** Bad warning for line %d:" % row, bad)
print("*", line)
elif intlong and not floatcomplex:
print("%dc%d" % (row, row))
print("<", line)
print("---")
print(">", line[:col] + "/" + line[col:])
elif floatcomplex and not intlong:
print("True division / operator at line %d:" % row)
print("=", line)
elif intlong and floatcomplex:
print("*** Ambiguous / operator (%s, %s) at line %d:" % (
"|".join(intlong), "|".join(floatcomplex), row))
print("?", line)
fp.close()
def reportphantomwarnings(warnings, f):
blocks = []
lastrow = None
lastblock = None
for row, what in warnings:
if row != lastrow:
lastblock = [row]
blocks.append(lastblock)
lastblock.append(what)
for block in blocks:
row = block[0]
whats = "/".join(block[1:])
print("*** Phantom %s warnings for line %d:" % (whats, row))
f.report(row, mark="*")
def report(slashes, message):
lastrow = None
for (row, col), line in slashes:
if row != lastrow:
print("*** %s on line %d:" % (message, row))
print("*", chop(line))
lastrow = row
class FileContext:
def __init__(self, fp, window=5, lineno=1):
self.fp = fp
self.window = 5
self.lineno = 1
self.eoflookahead = 0
self.lookahead = []
self.buffer = []
def fill(self):
while len(self.lookahead) < self.window and not self.eoflookahead:
line = self.fp.readline()
if not line:
self.eoflookahead = 1
break
self.lookahead.append(line)
def readline(self):
self.fill()
if not self.lookahead:
return ""
line = self.lookahead.pop(0)
self.buffer.append(line)
self.lineno += 1
return line
def truncate(self):
del self.buffer[-window:]
def __getitem__(self, index):
self.fill()
bufstart = self.lineno - len(self.buffer)
lookend = self.lineno + len(self.lookahead)
if bufstart <= index < self.lineno:
return self.buffer[index - bufstart]
if self.lineno <= index < lookend:
return self.lookahead[index - self.lineno]
raise KeyError
def report(self, first, last=None, mark="*"):
if last is None:
last = first
for i in range(first, last+1):
try:
line = self[first]
except KeyError:
line = "<missing line>"
print(mark, chop(line))
def scanline(g):
slashes = []
startlineno = None
endlineno = None
for type, token, start, end, line in g:
endlineno = end[0]
if startlineno is None:
startlineno = endlineno
if token in ("/", "/="):
slashes.append((start, line))
if type == tokenize.NEWLINE:
break
return startlineno, endlineno, slashes
def chop(line):
if line.endswith("\n"):
return line[:-1]
else:
return line
if __name__ == "__main__":
sys.exit(main())
| lgpl-3.0 | -8,438,709,539,807,844,000 | 35.678947 | 84 | 0.600947 | false |
wbbeyourself/cn-deep-learning | ipnd-neural-network/NN.py | 6 | 2597 | import numpy as np
class NeuralNetwork(object):
def sigmoid(self, x):
return 1/(1 + np.exp(-x))
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
# Activation function is the sigmoid function
self.activation_function = self.sigmoid
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array, column vector
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
#Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# 1 is the gradient of f'(x) where f(x) = x
output_delta = (targets - final_outputs) * 1
hidden_delta = np.dot(self.weights_hidden_to_output.T, output_delta) * hidden_outputs * (1-hidden_outputs)
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_delta, hidden_outputs.T)
self.weights_input_to_hidden += self.lr * np.dot(hidden_delta, inputs.T)
#predict with a inputs_list
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
#Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
| mit | 2,306,840,412,373,380,000 | 38.348485 | 114 | 0.592992 | false |
EvilKanoa/ardupilot | libraries/AP_OpticalFlow/examples/ADNS3080ImageGrabber/ADNS3080ImageGrabber.py | 53 | 6246 | # File: ADNS3080ImageGrabber.py
import serial
import string
import math
import time
from Tkinter import *
from threading import Timer
comPort = 'COM8' #default com port
comPortBaud = 115200
class App:
grid_size = 15
num_pixels = 30
image_started = FALSE
image_current_row = 0;
ser = serial.Serial()
pixel_dictionary = {}
def __init__(self, master):
# set main window's title
master.title("ADNS3080ImageGrabber")
frame = Frame(master)
frame.grid(row=0,column=0)
self.comPortStr = StringVar()
self.comPort = Entry(frame,textvariable=self.comPortStr)
self.comPort.grid(row=0,column=0)
self.comPort.delete(0, END)
self.comPort.insert(0,comPort)
self.button = Button(frame, text="Open", fg="red", command=self.open_serial)
self.button.grid(row=0,column=1)
self.entryStr = StringVar()
self.entry = Entry(frame,textvariable=self.entryStr)
self.entry.grid(row=0,column=2)
self.entry.delete(0, END)
self.entry.insert(0,"I")
self.send_button = Button(frame, text="Send", command=self.send_to_serial)
self.send_button.grid(row=0,column=3)
self.canvas = Canvas(master, width=self.grid_size*self.num_pixels, height=self.grid_size*self.num_pixels)
self.canvas.grid(row=1)
## start attempts to read from serial port
self.read_loop()
def __del__(self):
self.stop_read_loop()
def open_serial(self):
# close the serial port
if( self.ser.isOpen() ):
try:
self.ser.close()
except:
i=i # do nothing
# open the serial port
try:
self.ser = serial.Serial(port=self.comPortStr.get(),baudrate=comPortBaud, timeout=1)
print("serial port '" + self.comPortStr.get() + "' opened!")
except:
print("failed to open serial port '" + self.comPortStr.get() + "'")
def send_to_serial(self):
if self.ser.isOpen():
self.ser.write(self.entryStr.get())
print "sent '" + self.entryStr.get() + "' to " + self.ser.portstr
else:
print "Serial port not open!"
def read_loop(self):
try:
self.t.cancel()
except:
aVar = 1 # do nothing
#print("reading")
if( self.ser.isOpen() ) :
self.read_from_serial();
self.t = Timer(0.0,self.read_loop)
self.t.start()
def stop_read_loop(self):
try:
self.t.cancel()
except:
print("failed to cancel timer")
# do nothing
def read_from_serial(self):
if( self.ser.isOpen() ):
while( self.ser.inWaiting() > 0 ):
self.line_processed = FALSE
line = self.ser.readline()
# process the line read
if( line.find("-------------------------") == 0 ):
self.line_processed = TRUE
self.image_started = FALSE
self.image_current_row = 0
if( self.image_started == TRUE ):
if( self.image_current_row >= self.num_pixels ):
self.image_started == FALSE
else:
words = string.split(line,",")
if len(words) >= 30:
self.line_processed = TRUE
x = 0
for v in words:
try:
colour = int(v)
except:
colour = 0;
#self.display_pixel(x,self.image_current_row,colour)
self.display_pixel(self.num_pixels-1-self.image_current_row,self.num_pixels-1-x,colour)
x += 1
self.image_current_row += 1
else:
print("line " + str(self.image_current_row) + "incomplete (" + str(len(words)) + " of " + str(self.num_pixels) + "), ignoring")
#print("bad line: " + line);
if( line.find("image data") >= 0 ):
self.line_processed = TRUE
self.image_started = TRUE
self.image_current_row = 0
# clear canvas
#self.canvas.delete(ALL) # remove all items
#display the line if we couldn't understand it
if( self.line_processed == FALSE ):
print( line )
def display_default_image(self):
# display the grid
for x in range(0, self.num_pixels-1):
for y in range(0, self.num_pixels-1):
colour = x * y / 3.53
self.display_pixel(x,y,colour)
def display_pixel(self, x, y, colour):
if( x >= 0 and x < self.num_pixels and y >= 0 and y < self.num_pixels ) :
#find the old pixel if it exists and delete it
if self.pixel_dictionary.has_key(x+y*self.num_pixels) :
self.old_pixel = self.pixel_dictionary[x+y*self.num_pixels]
self.canvas.delete(self.old_pixel)
del(self.old_pixel)
fillColour = "#%02x%02x%02x" % (colour, colour, colour)
#draw a new pixel and add to pixel_array
self.new_pixel = self.canvas.create_rectangle(x*self.grid_size, y*self.grid_size, (x+1)*self.grid_size, (y+1)*self.grid_size, fill=fillColour)
self.pixel_dictionary[x+y*self.num_pixels] = self.new_pixel
## main loop ##
root = Tk()
#root.withdraw()
#serPort = SerialHandler(comPort,comPortBaud)
# create main display
app = App(root)
app.display_default_image()
print("entering main loop!")
root.mainloop()
app.stop_read_loop()
print("exiting")
| gpl-3.0 | 5,385,279,253,711,020,000 | 32.7 | 155 | 0.493276 | false |
CentOS-PaaS-SIG/linchpin | linchpin/provision/action_plugins/gcp_compute_network.py | 3 | 1255 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import linchpin.MockUtils.MockUtils as mock_utils
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
"""
Simple action plugin that returns the mocked output
when linchpin_mock is True
"""
super(ActionModule, self).run(tmp, task_vars)
# contains all the module arguments
module_args = self._task.args.copy()
# task vars.keys() contains all the variable required
# when passed a extra_var as key value pair task_vars
# would return mocked output of the named module.
# print(task_vars['vars'].keys())
# print(task_vars['vars'].get('linchpin_mock', False))
linchpin_mock = task_vars['vars'].get('linchpin_mock',
False)
if linchpin_mock:
return mock_utils.get_mock_data(module_args,
"gcp_compute_network")
module_return = self._execute_module(module_args=module_args,
task_vars=task_vars, tmp=tmp)
return module_return
| gpl-3.0 | -188,484,870,798,165,470 | 40.833333 | 74 | 0.588845 | false |
ubc/edx-ora2 | openassessment/assessment/worker/training.py | 10 | 12547 | """
Asynchronous tasks for training classifiers from examples.
"""
import datetime
from collections import defaultdict
from celery import task
from celery.utils.log import get_task_logger
from dogapi import dog_stats_api
from django.conf import settings
from django.db import DatabaseError
from openassessment.assessment.api import ai_worker as ai_worker_api
from openassessment.assessment.errors import AIError, ANTICIPATED_CELERY_ERRORS
from .algorithm import AIAlgorithm, AIAlgorithmError
from .grading import reschedule_grading_tasks
from openassessment.assessment.errors.ai import AIGradingInternalError
from openassessment.assessment.models.ai import AITrainingWorkflow
MAX_RETRIES = 2
logger = get_task_logger(__name__)
# If the Django settings define a low-priority queue, use that.
# Otherwise, use the default queue.
TRAINING_TASK_QUEUE = getattr(settings, 'LOW_PRIORITY_QUEUE', None)
RESCHEDULE_TASK_QUEUE = getattr(settings, 'LOW_PRIORITY_QUEUE', None)
class InvalidExample(Exception):
"""
The example retrieved from the AI API had an invalid format.
"""
def __init__(self, example_dict, msg):
err_msg = u"Training example \"{example}\" is not valid: {msg}".format(
example=example_dict,
msg=msg
)
super(InvalidExample, self).__init__(err_msg)
@task(queue=TRAINING_TASK_QUEUE, max_retries=MAX_RETRIES) # pylint: disable=E1102
@dog_stats_api.timed('openassessment.assessment.ai.train_classifiers.time')
def train_classifiers(workflow_uuid):
"""
Asynchronous task to train classifiers for AI grading.
This task uses the AI API to retrieve task parameters
(algorithm ID and training examples) and upload
the trained classifiers.
If the task could not be completed successfully,
it is retried a few times. If it continues to fail,
it is left incomplete. Since the AI API tracks all
training tasks in the database, incomplete tasks
can always be rescheduled manually later.
Args:
workflow_uuid (str): The UUID of the workflow associated
with this training task.
Returns:
None
Raises:
AIError: An error occurred during a request to the AI API.
AIAlgorithmError: An error occurred while training the AI classifiers.
InvalidExample: The training examples provided by the AI API were not valid.
"""
# Short-circuit if the workflow is already marked complete
# This is an optimization, but training tasks could still
# execute multiple times depending on when they get picked
# up by workers and marked complete.
try:
if ai_worker_api.is_training_workflow_complete(workflow_uuid):
return
except AIError:
msg = (
u"An unexpected error occurred while checking the "
u"completion of training workflow with UUID {uuid}"
).format(uuid=workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Retrieve task parameters
try:
params = ai_worker_api.get_training_task_params(workflow_uuid)
examples = params['training_examples']
algorithm_id = params['algorithm_id']
course_id = params['course_id']
item_id = params['item_id']
except (AIError, KeyError):
msg = (
u"An error occurred while retrieving AI training "
u"task parameters for the workflow with UUID {}"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Retrieve the ML algorithm to use for training
# (based on task params and worker configuration)
try:
algorithm = AIAlgorithm.algorithm_for_id(algorithm_id)
except AIAlgorithmError:
msg = (
u"An error occurred while loading the "
u"AI algorithm (training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
except AIError:
msg = (
u"An error occurred while retrieving "
u"the algorithm ID (training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Train a classifier for each criterion
# The AIAlgorithm subclass is responsible for ensuring that
# the trained classifiers are JSON-serializable.
try:
classifier_set = {
criterion_name: algorithm.train_classifier(examples_dict)
for criterion_name, examples_dict
in _examples_by_criterion(examples).iteritems()
}
except InvalidExample:
msg = (
u"Training example format was not valid "
u"(training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
except AIAlgorithmError:
msg = (
u"An error occurred while training AI classifiers "
u"(training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Upload the classifiers
# (implicitly marks the workflow complete)
try:
ai_worker_api.create_classifiers(workflow_uuid, classifier_set)
except AIError:
msg = (
u"An error occurred while uploading trained classifiers "
u"(training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Upon successful completion of the creation of classifiers, we will try to automatically schedule any
# grading tasks for the same item.
try:
reschedule_grading_tasks.apply_async(args=[course_id, item_id])
except AIGradingInternalError as ex:
msg = (
u"An error occured while trying to regrade all ungraded assignments"
u"after classifiers were trained successfully: {}"
).format(ex)
logger.exception(msg)
# Here we don't retry, because they will already retry once in the grading task.
raise
@task(queue=RESCHEDULE_TASK_QUEUE, max_retries=MAX_RETRIES) #pylint: disable=E1102
@dog_stats_api.timed('openassessment.assessment.ai.reschedule_training_tasks.time')
def reschedule_training_tasks(course_id, item_id):
"""
Reschedules all incomplete training tasks
Args:
course_id (unicode): The course that we are going to search for unfinished training workflows
item_id (unicode): The specific item within that course that we will reschedule unfinished workflows for
Raises:
AIReschedulingInternalError
DatabaseError
"""
# Starts logging the details of the rescheduling
_log_start_reschedule_training(course_id=course_id, item_id=item_id)
start_time = datetime.datetime.now()
# Run a query to find the incomplete training workflows
try:
training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id, item_id)
except (DatabaseError, AITrainingWorkflow.DoesNotExist) as ex:
msg = (
u"An unexpected error occurred while retrieving all incomplete "
u"training tasks for course_id: {cid} and item_id: {iid}: {ex}"
).format(cid=course_id, iid=item_id, ex=ex)
logger.exception(msg)
raise reschedule_training_tasks.retry()
# Tries to train every workflow that has not completed.
for target_workflow in training_workflows:
try:
train_classifiers.apply_async(args=[target_workflow.uuid])
logger.info(
u"Rescheduling of training was successful for workflow with uuid{}".format(target_workflow.uuid)
)
except ANTICIPATED_CELERY_ERRORS as ex:
msg = (
u"An unexpected error occurred while scheduling the task for training workflow with UUID {id}: {ex}"
).format(id=target_workflow.uuid, ex=ex)
logger.exception(msg)
time_delta = datetime.datetime.now() - start_time
_log_complete_reschedule_training(
course_id=course_id, item_id=item_id, seconds=time_delta.total_seconds(), success=False
)
raise reschedule_training_tasks.retry()
# Logs the total time to reschedule all training of classifiers if not logged beforehand by exception.
time_delta = datetime.datetime.now() - start_time
_log_complete_reschedule_training(
course_id=course_id, item_id=item_id, seconds=time_delta.total_seconds(), success=True
)
def _examples_by_criterion(examples):
"""
Transform the examples returned by the AI API into our internal format.
Args:
examples (list): Training examples of the form returned by the AI API.
Each element of the list should be a dictionary with keys
'text' (the essay text) and 'scores' (a dictionary mapping
criterion names to numeric scores).
Returns:
dict: keys are the criteria names, and each value is list of `AIAlgorithm.ExampleEssay`s
Raises:
InvalidExample: The provided training examples are not in a valid format.
"""
internal_examples = defaultdict(list)
prev_criteria = None
for example_dict in examples:
# Check that the example contains the expected keys
try:
scores_dict = example_dict['scores']
text = unicode(example_dict['text'])
except KeyError:
raise InvalidExample(example_dict, u'Example dict must have keys "scores" and "text"')
# Check that the criteria names are consistent across examples
if prev_criteria is None:
prev_criteria = set(scores_dict.keys())
else:
if prev_criteria != set(scores_dict.keys()):
msg = (
u"Example criteria do not match "
u"the previous example: {criteria}"
).format(criteria=prev_criteria)
raise InvalidExample(example_dict, msg)
for criterion_name, score in scores_dict.iteritems():
try:
score = int(score)
except ValueError:
raise InvalidExample(example_dict, u"Example score is not an integer")
else:
internal_ex = AIAlgorithm.ExampleEssay(text, score)
internal_examples[criterion_name].append(internal_ex)
return internal_examples
def _log_start_reschedule_training(course_id=None, item_id=None):
"""
Sends data about the rescheduling_training task to datadog
Args:
course_id (unicode): the course id to associate with the log start
item_id (unicode): the item id to tag with the log start
"""
tags = [
u"course_id:{}".format(course_id),
u"item_id:{}".format(item_id),
]
dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.scheduled_count', tags)
msg = u"Rescheduling of incomplete training tasks began for course_id={cid} and item_id={iid}"
logger.info(msg.format(cid=course_id, iid=item_id))
def _log_complete_reschedule_training(course_id=None, item_id=None, seconds=-1, success=False):
"""
Sends the total time the rescheduling of training tasks took to datadog
Note that this function may be invoked multiple times per call to reschedule_training_tasks,
because the time for EACH ATTEMPT is taken (i.e. if we fail (by error) to schedule training once,
we log the time elapsed before trying again.)
Args:
course_id (unicode): the course_id to tag the task with
item_id (unicode): the item_id to tag the task with
seconds (int): the number of seconds that elapsed during the rescheduling task.
success (bool): indicates whether or not all attempts to reschedule were successful
"""
tags = [
u"course_id:{}".format(course_id),
u"item_id:{}".format(item_id),
u"success:{}".format(success)
]
dog_stats_api.histogram('openassessment.assessment.ai_task.AIRescheduleTraining.turnaround_time', seconds,tags)
dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.completed_count', tags)
msg = u"Rescheduling of incomplete training tasks for course_id={cid} and item_id={iid} completed in {s} seconds."
if not success:
msg += u" At least one rescheduling task failed due to internal error."
msg.format(cid=course_id, iid=item_id, s=seconds)
logger.info(msg)
| agpl-3.0 | -6,288,596,473,735,781,000 | 38.831746 | 118 | 0.665657 | false |
westinedu/sovleit | django/test/simple.py | 150 | 15012 | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
from django.utils import unittest
try:
all
except NameError:
from django.utils.itercompat import all
__all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner', 'run_tests')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner",
PendingDeprecationWarning
)
super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
try:
app_path = app_module.__name__.split('.')[:-1]
test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE)
except ImportError, e:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
import os.path
from imp import find_module
try:
mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)])
except ImportError:
# 'tests' module doesn't exist. Move on.
test_module = None
else:
# The module exists, so there must be an import error in the
# test module itself. We don't need the module; so if the
# module was a single file module (i.e., tests.py), close the file
# handle returned by find_module. Otherwise, the test module
# is a directory, and there is nothing to close.
if mod[0]:
mod[0].close()
raise
return test_module
def build_suite(app_module):
"Create a complete Django test suite for the provided application module"
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module))
try:
suite.addTest(doctest.DocTestSuite(test_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(TestClass)
except TypeError:
raise ValueError("Test label '%s' does not refer to a test class" % label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def run_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None):
import warnings
warnings.warn(
'The run_tests() test runner has been deprecated in favor of DjangoTestSuiteRunner.',
DeprecationWarning
)
test_runner = DjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
return test_runner.run_tests(test_labels, extra_tests=extra_tests)
| bsd-3-clause | 7,682,456,547,900,616,000 | 39.354839 | 115 | 0.600853 | false |
jrabbit/compose | tests/unit/cli/command_test.py | 9 | 3080 | # ~*~ encoding: utf-8 ~*~
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import pytest
import six
from compose.cli.command import get_config_path_from_options
from compose.config.environment import Environment
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
class TestGetConfigPathFromOptions(object):
def test_path_from_options(self):
paths = ['one.yml', 'two.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options('.', opts, environment) == paths
def test_single_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options('.', {}, environment) == ['one.yml']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator')
def test_multiple_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml:two.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['one.yml', 'two.yml']
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator')
def test_multiple_path_from_env_windows(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml;two.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['one.yml', 'two.yml']
def test_multiple_path_from_env_custom_separator(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PATH_SEPARATOR'] = '^'
os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['c:\\one.yml', '.\\semi;colon.yml']
def test_no_path(self):
environment = Environment.from_env_file('.')
assert not get_config_path_from_options('.', {}, environment)
def test_unicode_path_from_options(self):
paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', opts, environment
) == ['就吃饭/docker-compose.yml']
@pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
def test_unicode_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['就吃饭/docker-compose.yml']
| apache-2.0 | -7,774,408,549,648,078,000 | 39.368421 | 99 | 0.603977 | false |
thingsinjars/electron | script/dump-symbols.py | 144 | 1962 | #!/usr/bin/env python
import os
import sys
from lib.config import PLATFORM
from lib.util import atom_gyp, execute, rm_rf
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'download', 'libchromiumcontent', 'static_library')
def main(destination):
# if PLATFORM == 'win32':
# register_required_dll()
rm_rf(destination)
(project_name, product_name) = get_names_from_gyp()
if PLATFORM in ['darwin', 'linux']:
generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'posix',
'generate_breakpad_symbols.py')
if PLATFORM == 'darwin':
start = os.path.join(OUT_DIR, '{0}.app'.format(product_name), 'Contents',
'MacOS', product_name)
else:
start = os.path.join(OUT_DIR, project_name)
args = [
'--build-dir={0}'.format(OUT_DIR),
'--binary={0}'.format(start),
'--symbols-dir={0}'.format(destination),
'--libchromiumcontent-dir={0}'.format(CHROMIUM_DIR),
'--clear',
'--jobs=16',
]
else:
generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'win',
'generate_breakpad_symbols.py')
args = [
'--symbols-dir={0}'.format(destination),
'--jobs=16',
os.path.relpath(OUT_DIR),
]
execute([sys.executable, generate_breakpad_symbols] + args)
def register_required_dll():
register = os.path.join(SOURCE_ROOT, 'tools', 'win',
'register_msdia80_dll.js')
execute(['node.exe', os.path.relpath(register)]);
def get_names_from_gyp():
variables = atom_gyp()
return (variables['project_name%'], variables['product_name%'])
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| mit | 4,067,839,435,665,491,500 | 29.65625 | 79 | 0.585627 | false |
tima/ansible | contrib/inventory/vmware.py | 92 | 18476 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMware Inventory Script
=======================
Retrieve information about virtual machines from a vCenter server or
standalone ESX host. When `group_by=false` (in the INI file), host systems
are also returned in addition to VMs.
This script will attempt to read configuration from an INI file with the same
base filename if present, or `vmware.ini` if not. It is possible to create
symlinks to the inventory script to support multiple configurations, e.g.:
* `vmware.py` (this script)
* `vmware.ini` (default configuration, will be read by `vmware.py`)
* `vmware_test.py` (symlink to `vmware.py`)
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
`vmware_other.ini` exists)
The path to an INI file may also be specified via the `VMWARE_INI` environment
variable, in which case the filename matching rules above will not apply.
Host and authentication parameters may be specified via the `VMWARE_HOST`,
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
from __future__ import print_function
import collections
import json
import logging
import optparse
import os
import ssl
import sys
import time
from six import integer_types, text_type, string_types
from six.moves import configparser
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource
from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
def __init__(self, guests_only=None):
self.config = configparser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
break
# Retrieve only guest VMs, or include host systems?
if guests_only is not None:
self.guests_only = guests_only
elif self.config.has_option('defaults', 'guests_only'):
self.guests_only = self.config.getboolean('defaults', 'guests_only')
else:
self.guests_only = True
# Read authentication information from VMware environment variables
# (if set), otherwise from INI file.
auth_host = os.environ.get('VMWARE_HOST')
if not auth_host and self.config.has_option('auth', 'host'):
auth_host = self.config.get('auth', 'host')
auth_user = os.environ.get('VMWARE_USER')
if not auth_user and self.config.has_option('auth', 'user'):
auth_user = self.config.get('auth', 'user')
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
sslcheck = os.environ.get('VMWARE_SSLCHECK')
if not sslcheck and self.config.has_option('auth', 'sslcheck'):
sslcheck = self.config.get('auth', 'sslcheck')
if not sslcheck:
sslcheck = True
else:
if sslcheck.lower() in ['no', 'false']:
sslcheck = False
else:
sslcheck = True
# Limit the clusters being scanned
self.filter_clusters = os.environ.get('VMWARE_CLUSTERS')
if not self.filter_clusters and self.config.has_option('defaults', 'clusters'):
self.filter_clusters = self.config.get('defaults', 'clusters')
if self.filter_clusters:
self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()]
# Override certificate checks
if not sslcheck:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
def _put_cache(self, name, value):
'''
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
with open(cache_file, 'w') as cache:
json.dump(value, cache)
def _get_cache(self, name, default=None):
'''
Retrieves the value from cache for the given name.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = self.config.get('defaults', 'cache_dir')
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
if self.config.has_option('defaults', 'cache_max_age'):
cache_max_age = self.config.getint('defaults', 'cache_max_age')
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
def _flatten_dict(self, d, parent_key='', sep='_'):
'''
Flatten nested dicts by combining keys with a separator. Lists with
only string items are included as is; any other lists are discarded.
'''
items = []
for k, v in d.items():
if k.startswith('_'):
continue
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, string_types) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
return dict(items)
def _get_obj_info(self, obj, depth=99, seen=None):
'''
Recursively build a data structure for the given pSphere object (depth
only applies to ManagedObject instances).
'''
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
obj_unicode = text_type(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
return obj_unicode
seen.add(obj)
if depth <= 0:
return obj_unicode
d = {}
for attr in dir(obj):
if attr.startswith('_'):
continue
try:
val = getattr(obj, attr)
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
except Exception as e:
pass
return d
elif isinstance(obj, SudsObject):
d = {}
for key, val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
d[key] = obj_info
return d
elif isinstance(obj, (list, tuple)):
l = []
for val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, float) + string_types + integer_types):
return obj
else:
return ()
def _get_host_info(self, host, prefix='vmware'):
'''
Return a flattened dict with info about the given host system.
'''
host_info = {
'name': host.name,
}
for attr in ('datastore', 'network', 'vm'):
try:
value = getattr(host, attr)
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
host_info['%ss' % attr] = []
for k, v in self._get_obj_info(host.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
host_info[k2] = v2
elif k != 'host':
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
except Exception as e:
print(e, file=sys.stderr)
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
return host_info
def _get_vm_info(self, vm, prefix='vmware'):
'''
Return a flattened dict with info about the given virtual machine.
'''
vm_info = {
'name': vm.name,
}
for attr in ('datastore', 'network'):
try:
value = getattr(vm, attr)
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
vm_info['%ss' % attr] = []
try:
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
except AttributeError:
vm_info['resourcePool'] = ''
try:
vm_info['guestState'] = vm.guest.guestState
except AttributeError:
vm_info['guestState'] = ''
for k, v in self._get_obj_info(vm.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
if k2 == 'host':
k2 = 'hostSystem'
vm_info[k2] = v2
elif k != 'vm':
vm_info[k] = v
vm_info = self._flatten_dict(vm_info, prefix)
if ('%s_ipAddress' % prefix) in vm_info:
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
return vm_info
def _add_host(self, inv, parent_group, host_name):
'''
Add the host to the parent group in the given inventory.
'''
p_group = inv.setdefault(parent_group, [])
if isinstance(p_group, dict):
group_hosts = p_group.setdefault('hosts', [])
else:
group_hosts = p_group
if host_name not in group_hosts:
group_hosts.append(host_name)
def _add_child(self, inv, parent_group, child_group):
'''
Add a child group to a parent group in the given inventory.
'''
if parent_group != 'all':
p_group = inv.setdefault(parent_group, {})
if not isinstance(p_group, dict):
inv[parent_group] = {'hosts': p_group}
p_group = inv[parent_group]
group_children = p_group.setdefault('children', [])
if child_group not in group_children:
group_children.append(child_group)
inv.setdefault(child_group, [])
def get_inventory(self, meta_hostvars=True):
'''
Reads the inventory from cache or VMware API via pSphere.
'''
# Use different cache names for guests only vs. all hosts.
if self.guests_only:
cache_name = '__inventory_guests__'
else:
cache_name = '__inventory_all__'
inv = self._get_cache(cache_name, None)
if inv is not None:
return inv
inv = {'all': {'hosts': []}}
if meta_hostvars:
inv['_meta'] = {'hostvars': {}}
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if not self.guests_only:
if self.config.has_option('defaults', 'hw_group'):
hw_group = self.config.get('defaults', 'hw_group')
else:
hw_group = default_group + '_hw'
if self.config.has_option('defaults', 'vm_group'):
vm_group = self.config.get('defaults', 'vm_group')
else:
vm_group = default_group + '_vm'
if self.config.has_option('defaults', 'prefix_filter'):
prefix_filter = self.config.get('defaults', 'prefix_filter')
else:
prefix_filter = None
if self.filter_clusters:
# Loop through clusters and find hosts:
hosts = []
for cluster in ClusterComputeResource.all(self.client):
if cluster.name in self.filter_clusters:
for host in cluster.host:
hosts.append(host)
else:
# Get list of all physical hosts
hosts = HostSystem.all(self.client)
# Loop through physical hosts:
for host in hosts:
if not self.guests_only:
self._add_host(inv, 'all', host.name)
self._add_host(inv, hw_group, host.name)
host_info = self._get_host_info(host)
if meta_hostvars:
inv['_meta']['hostvars'][host.name] = host_info
self._put_cache(host.name, host_info)
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith(prefix_filter):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
if meta_hostvars:
inv['_meta']['hostvars'][vm.name] = vm_info
self._put_cache(vm.name, vm_info)
# Group by resource pool.
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
if vm_resourcePool:
self._add_child(inv, vm_group, 'resource_pools')
self._add_child(inv, 'resource_pools', vm_resourcePool)
self._add_host(inv, vm_resourcePool, vm.name)
# Group by datastore.
for vm_datastore in vm_info.get('vmware_datastores', []):
self._add_child(inv, vm_group, 'datastores')
self._add_child(inv, 'datastores', vm_datastore)
self._add_host(inv, vm_datastore, vm.name)
# Group by network.
for vm_network in vm_info.get('vmware_networks', []):
self._add_child(inv, vm_group, 'networks')
self._add_child(inv, 'networks', vm_network)
self._add_host(inv, vm_network, vm.name)
# Group by guest OS.
vm_guestId = vm_info.get('vmware_guestId', None)
if vm_guestId:
self._add_child(inv, vm_group, 'guests')
self._add_child(inv, 'guests', vm_guestId)
self._add_host(inv, vm_guestId, vm.name)
# Group all VM templates.
vm_template = vm_info.get('vmware_template', False)
if vm_template:
self._add_child(inv, vm_group, 'templates')
self._add_host(inv, 'templates', vm.name)
self._put_cache(cache_name, inv)
return inv
def get_host(self, hostname):
'''
Read info about a specific host or VM from cache or VMware API.
'''
inv = self._get_cache(hostname, None)
if inv is not None:
return inv
if not self.guests_only:
try:
host = HostSystem.get(self.client, name=hostname)
inv = self._get_host_info(host)
except ObjectNotFoundError:
pass
if inv is None:
try:
vm = VirtualMachine.get(self.client, name=hostname)
inv = self._get_vm_info(vm)
except ObjectNotFoundError:
pass
if inv is not None:
self._put_cache(hostname, inv)
return inv or {}
def main():
parser = optparse.OptionParser()
parser.add_option('--list', action='store_true', dest='list',
default=False, help='Output inventory groups and hosts')
parser.add_option('--host', dest='host', default=None, metavar='HOST',
help='Output variables only for the given hostname')
# Additional options for use when running the script standalone, but never
# used by Ansible.
parser.add_option('--pretty', action='store_true', dest='pretty',
default=False, help='Output nicely-formatted JSON')
parser.add_option('--include-host-systems', action='store_true',
dest='include_host_systems', default=False,
help='Include host systems in addition to VMs')
parser.add_option('--no-meta-hostvars', action='store_false',
dest='meta_hostvars', default=True,
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
options, args = parser.parse_args()
if options.include_host_systems:
vmware_inventory = VMwareInventory(guests_only=False)
else:
vmware_inventory = VMwareInventory()
if options.host is not None:
inventory = vmware_inventory.get_host(options.host)
else:
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
json_kwargs = {}
if options.pretty:
json_kwargs.update({'indent': 4, 'sort_keys': True})
json.dump(inventory, sys.stdout, **json_kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,765,515,105,445,990,000 | 38.144068 | 109 | 0.551472 | false |
Nachtfeuer/concept-py | tests/test_vector_2d.py | 1 | 5600 | """
=======
License
=======
Copyright (c) 2017 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=R0201
import math
import unittest
from hamcrest import assert_that, equal_to
from concept.math.vector import Vector2d
class TestVector2d(unittest.TestCase):
""" Testing math 2d vector. """
def test_init(self):
"""Testing of method Vector2d.__init__."""
assert_that(Vector2d(1.0, 2.0).x, equal_to(1.0))
assert_that(Vector2d(1.0, 2.0).y, equal_to(2.0))
assert_that(Vector2d(), equal_to(Vector2d(0.0, 0.0)))
def test_repr(self):
"""Testing of method Vector2d.__repr__."""
assert_that(str(Vector2d(1.2, 3.4)), equal_to("Vector2d(x=1.2, y=3.4)"))
def test_add(self):
"""Testing of method Vector2d.__add__."""
assert_that(Vector2d(1.0, 2.0) + Vector2d(3.0, 4.0), equal_to(Vector2d(4.0, 6.0)))
def test_sub(self):
"""Testing of method Vector2d.__sub__."""
assert_that(Vector2d(1.0, 5.0) - Vector2d(3.0, 4.0), equal_to(Vector2d(-2.0, 1.0)))
def test_scalar_product(self):
"""Testing of method Vector2d.scalar_product."""
assert_that(Vector2d(2.0, 5.0).scalar_product(Vector2d(3.0, 4.0)), equal_to(26))
def test_length(self):
"""Testing of method Vector2d.length."""
assert_that(Vector2d(3.0, 4.0).length(), equal_to(5.0))
def test_scaled(self):
"""Testing of method Vector2d.scaled."""
vec = Vector2d(3.0, 4.0)
assert_that(vec.scaled(2), equal_to(Vector2d(6.0, 8.0)))
assert_that(vec, equal_to(Vector2d(3.0, 4.0)))
def test_scale(self):
"""Testing of method Vector2d.scale."""
vec = Vector2d(3.0, 4.0)
vec.scale(2.0)
assert_that(vec, equal_to(Vector2d(6.0, 8.0)))
def test_rotated(self):
"""Testing of method Vector2d.rotated."""
vec_a = Vector2d(1.0, 0.0)
vec_b = vec_a.rotated(math.pi / 180.0 * 90)
assert_that(abs(vec_b.x) < 1e-10, equal_to(True))
assert_that(abs(vec_b.y - 1.0) < 1e-10, equal_to(True))
def test_turned_left(self):
"""Testing of method Vector2d.turned_left."""
assert_that(Vector2d(1.0, 0.0).turned_left(), equal_to(Vector2d(0.0, 1.0)))
assert_that(Vector2d(0.0, 1.0).turned_left(), equal_to(Vector2d(-1.0, 0.0)))
assert_that(Vector2d(-1.0, 0.0).turned_left(), equal_to(Vector2d(0.0, -1.0)))
assert_that(Vector2d(0.0, -1.0).turned_left(), equal_to(Vector2d(1.0, 0.0)))
def test_turned_right(self):
"""Testing of method Vector2d.turned_right."""
assert_that(Vector2d(1.0, 0.0).turned_right(), equal_to(Vector2d(0.0, -1.0)))
assert_that(Vector2d(0.0, -1.0).turned_right(), equal_to(Vector2d(-1.0, 0.0)))
assert_that(Vector2d(-1.0, 0.0).turned_right(), equal_to(Vector2d(0.0, 1.0)))
assert_that(Vector2d(0.0, 1.0).turned_right(), equal_to(Vector2d(1.0, 0.0)))
def test_angle(self):
"""Testing of method Vector2d.angle."""
angle_a = Vector2d(0.0, 1.0).angle(Vector2d(1.0, 0.0)) * 180.0 / math.pi
angle_b = Vector2d(1.0, 0.0).angle(Vector2d(0.0, 1.0)) * 180.0 / math.pi
assert_that(abs(angle_a - 90.0) <= 1e-10, equal_to(True))
assert_that(abs(angle_b + 90.0) <= 1e-10, equal_to(True))
def test_normalized(self):
"""Testing of method Vector2d.normalized."""
normalized_vec_a = Vector2d(10.0, 0).normalized()
normalized_vec_b = Vector2d(0.0, 10.0).normalized()
assert_that(normalized_vec_a, equal_to(Vector2d(1.0, 0.0)))
assert_that(normalized_vec_b, equal_to(Vector2d(0.0, 1.0)))
def test_cross_product(self):
"""Testing of method Vector2d.cross_product."""
assert_that(Vector2d(2.0, 5.0).cross_product(Vector2d(3.0, 4.0)), equal_to(-7.0))
def test_eq(self):
"""Testing of method Vector2d.__eq__."""
assert_that(Vector2d(1.2, 3.4), equal_to(Vector2d(1.2, 3.4)))
assert_that(Vector2d(1.2, 3.4).__eq__(1234), equal_to(False))
def test_neg(self):
"""Testing negating a vector."""
assert_that(-Vector2d(1.0, 2.0), equal_to(Vector2d(-1.0, -2.0)))
def test_is_perpendicular(self):
"""Testing method Vector2d.is_perpendicular."""
assert_that(Vector2d(0.0, 1.0).is_perpendicular(Vector2d(1.0, 0.0)), equal_to(True))
assert_that(Vector2d(1.0, 1.0).is_perpendicular(Vector2d(1.0, 0.0)), equal_to(False))
assert_that(Vector2d(1.0, 1.0).is_perpendicular("hello world"), equal_to(False))
| mit | 1,367,165,466,636,440,300 | 43.8 | 93 | 0.629643 | false |
kickstandproject/python-ripcordclient | ripcordclient/tests/v1/test_subscriber.py | 1 | 2502 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 PolyBeacon, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ripcordclient.tests import utils
from ripcordclient.v1 import subscriber
SUBSCRIBER = {
'username': 'alice',
'uuid': 'b5142338-d88a-403e-bb14-e1fba0a318d2',
}
CREATE_SUBSCRIBER = {
'username': 'alice',
}
FIXTURES = {
'/v1/subscribers': {
'GET': (
{},
[SUBSCRIBER],
),
'POST': (
{},
SUBSCRIBER,
),
},
'/v1/subscribers/%s' % SUBSCRIBER['uuid']: {
'GET': (
{},
SUBSCRIBER,
),
'DELETE': (
{},
None,
),
},
}
class SubscriberManagerTest(testtools.TestCase):
def setUp(self):
super(SubscriberManagerTest, self).setUp()
self.api = utils.FakeAPI(FIXTURES)
self.manager = subscriber.SubscriberManager(self.api)
def test_create(self):
res = self.manager.create(**CREATE_SUBSCRIBER)
expect = [
('POST', '/v1/subscribers', {}, CREATE_SUBSCRIBER),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(res)
def test_delete(self):
res = self.manager.delete(uuid=SUBSCRIBER['uuid'])
expect = [
('DELETE', '/v1/subscribers/%s' % SUBSCRIBER['uuid'], {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(res, None)
def test_list(self):
res = self.manager.list()
expect = [
('GET', '/v1/subscribers', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(res), 1)
def test_show(self):
res = self.manager.get(uuid=SUBSCRIBER['uuid'])
expect = [
('GET', '/v1/subscribers/%s' % SUBSCRIBER['uuid'], {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(res.uuid, SUBSCRIBER['uuid'])
| apache-2.0 | 3,949,065,709,225,430,500 | 26.494505 | 76 | 0.581535 | false |
OpenDataNode/ckanext-odn-ic2pc-sync | ckanext/commands/publishing_cmd.py | 1 | 5849 | '''
Created on 30.10.2014
@author: mvi
'''
from ckan.lib.cli import CkanCommand
import sys
import logging
from ckanext.model.external_catalog import external_catalog_table,\
migrate_to_v0_3, migrate_to_v0_4, migrate_to_v0_6
log = logging.getLogger('ckanext')
class PublishingCmd(CkanCommand):
'''Pushes datasets from one ckan to another
needs set properties in provided config file:
odn.ic2pc.src.ckan.url - source ckan from which we are harvesting datasets
odn.ic2pc.dst.ckan.url - destination ckan to which we are pushing the datasets
odn.ic2pc.dst.ckan.api.key - destination ckan api key needed for authentication
odn.ic2pc.package.extras.whitelist - package extras allowed to be synchronized
odn.ic2pc.resource.extras.whitelist - resource extras allowed to be synchronized
The whitelist properties have a blank space as delimiter
Usage:
publishing_cmd test
- start test that writes source and destination ckan url that are
set in provided config file
publishing_cmd run
- starts pushing datasets
publishing_cmd initdb
- initializes DB tables needed for THIS extension
publishing_cmd migrate_to_v0.3.0
- updates db model from v0.2.x to v0.3.0
publishing_cmd migrate_to_v0.4.0
- updates db model from v0.3.x to v0.4
publishing_cmd uninstall
- drops tables in DB needed for THIS extension
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 5
min_args = 0
def __init__(self, name):
super(PublishingCmd, self).__init__(name)
def command(self):
self._load_config()
if len(self.args) == 0:
self.parser.print_usage()
sys.exit(1)
cmd = self.args[0]
if cmd == 'test':
log.info('Starting [PublishingCmd test]')
conf = self._get_config()
src_ckan_url = conf.get('odn.ic2pc.src.ckan.url')
dst_ckan_url = conf.get('odn.ic2pc.dst.ckan.url')
dst_ckan_api_key = conf.get('odn.ic2pc.dst.ckan.api.key')
package_extras_whitelist = conf.get('odn.ic2pc.package.extras.whitelist')
resource_extras_whitelist = conf.get('odn.ic2pc.resource.extras.whitelist')
log.info('source ckan url: %s' % (src_ckan_url,))
log.info('destination ckan url: %s' % (dst_ckan_url,))
log.info('destination api key: %s' % (dst_ckan_api_key,))
log.info('package extras whitelist: {0}'.format(package_extras_whitelist))
log.info('resource extras whitelist: {0}'.format(resource_extras_whitelist))
elif cmd == 'run':
log.info('Starting [PublishingCmd run]')
from ckanext.publishing.ckan_sync import CkanSync
from odn_ckancommons.ckan_helper import CkanAPIWrapper
conf = self._get_config()
src_ckan_url = conf.get('odn.ic2pc.src.ckan.url')
dst_ckan_url = conf.get('odn.ic2pc.dst.ckan.url')
dst_ckan_api_key = conf.get('odn.ic2pc.dst.ckan.api.key')
package_extras_whitelist = conf.get('odn.ic2pc.package.extras.whitelist', "")
resource_extras_whitelist = conf.get('odn.ic2pc.resource.extras.whitelist', "")
package_extras_whitelist = package_extras_whitelist.split(' ')
resource_extras_whitelist = resource_extras_whitelist.split(' ')
assert src_ckan_url
assert dst_ckan_url
assert dst_ckan_api_key
src_ckan = CkanAPIWrapper(src_ckan_url, None)
dst_ckan = CkanAPIWrapper(dst_ckan_url, dst_ckan_api_key)
pusher = CkanSync()
pusher.push(src_ckan, dst_ckan, whitelist_package_extras=package_extras_whitelist,
whitelist_resource_extras=resource_extras_whitelist)
log.info('End of [PublishingCmd run]')
elif cmd == 'initdb':
log.info('Starting db initialization')
if not external_catalog_table.exists():
log.info("creating external_catalog table")
external_catalog_table.create()
log.info("external_catalog table created successfully")
else:
log.info("external_catalog table already exists")
log.info('End of db initialization')
elif cmd == 'migrate_to_v0.3.0':
log.info('Starting migration of DB to v0.3.0')
migrate_to_v0_3()
log.info('End of migration of DB to v0.3.0')
elif cmd == 'migrate_to_v0.4.0':
log.info('Starting migration of DB to v0.4.0')
migrate_to_v0_4()
log.info('End of migration of DB to v0.4.0')
elif cmd == 'migrate_to_v0.6.0':
log.info('Starting migration of DB to v0.6.0')
migrate_to_v0_6()
log.info('End of migration of DB to v0.6.0')
elif cmd == 'uninstall':
log.info('Starting uninstall command')
if external_catalog_table.exists():
log.info("dropping external_catalog table")
external_catalog_table.drop()
log.info("dropped external_catalog table successfully")
else:
log.info("Table external_catalog doesn't exist")
log.info('End of uninstall command')
else:
log.info('No command with name \'{0}\''.format(cmd))
def _load_config(self):
super(PublishingCmd, self)._load_config() | agpl-3.0 | 6,183,822,080,887,370,000 | 37.486842 | 94 | 0.574115 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/tool_shed/model/migrate/versions/0020_add_repository_type_column.py | 1 | 1608 | """Migration script to add the type column to the repository table."""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def upgrade( migrate_engine ):
print __doc__
metadata.bind = migrate_engine
metadata.reflect()
Repository_table = Table( "repository", metadata, autoload=True )
c = Column( "type", TrimmedString( 255 ), index=True )
try:
# Create
c.create( Repository_table, index_name="ix_repository_type" )
assert c is Repository_table.c.type
except Exception, e:
print "Adding type column to the repository table failed: %s" % str( e )
# Update the type column to have the default unrestricted value.
cmd = "UPDATE repository SET type = 'unrestricted'"
migrate_engine.execute( cmd )
def downgrade( migrate_engine ):
metadata.bind = migrate_engine
metadata.reflect()
# Drop type column from repository table.
Repository_table = Table( "repository", metadata, autoload=True )
try:
Repository_table.c.type.drop()
except Exception, e:
print "Dropping column type from the repository table failed: %s" % str( e )
| gpl-3.0 | 7,634,642,149,066,470,000 | 33.956522 | 84 | 0.698383 | false |
Subsets and Splits