repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
nicholascapo/powerline-shell | lib/colortrans.py | 32 | 8246 | #! /usr/bin/env python
"""
Code is modified (fairly heavily) by [email protected] from
https://gist.github.com/MicahElliott/719710
Convert values between RGB tuples and xterm-256 color codes.
Nice long listing of all 256 colors and their codes. Useful for
developing console color themes, or even script output schemes.
Resources:
* http://en.wikipedia.org/wiki/8-bit_color
* http://en.wikipedia.org/wiki/ANSI_escape_code
* /usr/share/X11/rgb.txt
I'm not sure where this script was inspired from. I think I must have
written it from scratch, though it's been several years now.
"""
__author__ = 'Micah Elliott http://MicahElliott.com'
__version__ = '0.1'
__copyright__ = 'Copyright (C) 2011 Micah Elliott. All rights reserved.'
__license__ = 'WTFPL http://sam.zoy.org/wtfpl/'
#---------------------------------------------------------------------
def hexstr2num(hexstr):
return int(hexstr, 16)
def rgbstring2tuple(s):
return tuple([hexstr2num(h) for h in (s[:2], s[2:4], s[4:])])
RGB2SHORT_DICT = {
(0, 0, 0): 16,
(0, 0, 95): 17,
(0, 0, 128): 4,
(0, 0, 135): 18,
(0, 0, 175): 19,
(0, 0, 215): 20,
(0, 0, 255): 12,
(0, 95, 0): 22,
(0, 95, 95): 23,
(0, 95, 135): 24,
(0, 95, 175): 25,
(0, 95, 215): 26,
(0, 95, 255): 27,
(0, 128, 0): 2,
(0, 128, 128): 6,
(0, 135, 0): 28,
(0, 135, 95): 29,
(0, 135, 135): 30,
(0, 135, 175): 31,
(0, 135, 215): 32,
(0, 135, 255): 33,
(0, 175, 0): 34,
(0, 175, 95): 35,
(0, 175, 135): 36,
(0, 175, 175): 37,
(0, 175, 215): 38,
(0, 175, 255): 39,
(0, 215, 0): 40,
(0, 215, 95): 41,
(0, 215, 135): 42,
(0, 215, 175): 43,
(0, 215, 215): 44,
(0, 215, 255): 45,
(0, 255, 0): 46,
(0, 255, 95): 47,
(0, 255, 135): 48,
(0, 255, 175): 49,
(0, 255, 215): 50,
(0, 255, 255): 14,
(8, 8, 8): 232,
(18, 18, 18): 233,
(28, 28, 28): 234,
(38, 38, 38): 235,
(48, 48, 48): 236,
(58, 58, 58): 237,
(68, 68, 68): 238,
(78, 78, 78): 239,
(88, 88, 88): 240,
(95, 0, 0): 52,
(95, 0, 95): 53,
(95, 0, 135): 54,
(95, 0, 175): 55,
(95, 0, 215): 56,
(95, 0, 255): 57,
(95, 95, 0): 58,
(95, 95, 95): 59,
(95, 95, 135): 60,
(95, 95, 175): 61,
(95, 95, 215): 62,
(95, 95, 255): 63,
(95, 135, 0): 64,
(95, 135, 95): 65,
(95, 135, 135): 66,
(95, 135, 175): 67,
(95, 135, 215): 68,
(95, 135, 255): 69,
(95, 175, 0): 70,
(95, 175, 95) : 71,
(95, 175, 135): 72,
(95, 175, 175): 73,
(95, 175, 215): 74,
(95, 175, 255): 75,
(95, 215, 0): 76,
(95, 215, 95) : 77,
(95, 215, 135): 78,
(95, 215, 175): 79,
(95, 215, 215): 80,
(95, 215, 255): 81,
(95, 255, 0): 82,
(95, 255, 95) : 83,
(95, 255, 135): 84,
(95, 255, 175): 85,
(95, 255, 215): 86,
(95, 255, 255): 87,
(98, 98, 98): 241,
(108, 108, 108): 242,
(118, 118, 118): 243,
(128, 0, 0): 1,
(128, 0, 128): 5,
(128, 128, 0): 3,
(128, 128, 128): 244,
(135, 0, 0): 88,
(135, 0, 95): 89,
(135, 0, 135): 90,
(135, 0, 175): 91,
(135, 0, 215): 92,
(135, 0, 255): 93,
(135, 95, 0): 94,
(135, 95, 95): 95,
(135, 95, 135): 96,
(135, 95, 175): 97,
(135, 95, 215): 98,
(135, 95, 255): 99,
(135, 135, 0): 100,
(135, 135, 95): 101,
(135, 135, 135): 102,
(135, 135, 175): 103,
(135, 135, 215): 104,
(135, 135, 255): 105,
(135, 175, 0): 106,
(135, 175, 95): 107,
(135, 175, 135): 108,
(135, 175, 175): 109,
(135, 175, 215): 110,
(135, 175, 255): 111,
(135, 215, 0): 112,
(135, 215, 95): 113,
(135, 215, 135): 114,
(135, 215, 175): 115,
(135, 215, 215): 116,
(135, 215, 255): 117,
(135, 255, 0): 118,
(135, 255, 95): 119,
(135, 255, 135): 120,
(135, 255, 175): 121,
(135, 255, 215): 122,
(135, 255, 255): 123,
(138, 138, 138): 245,
(148, 148, 148): 246,
(158, 158, 158): 247,
(168, 168, 168): 248,
(175, 0, 0): 124,
(175, 0, 95): 125,
(175, 0, 135): 126,
(175, 0, 175): 127,
(175, 0, 215): 128,
(175, 0, 255): 129,
(175, 95, 0): 130,
(175, 95, 95): 131,
(175, 95, 135): 132,
(175, 95, 175): 133,
(175, 95, 215): 134,
(175, 95, 255): 135,
(175, 135, 0): 136,
(175, 135, 95): 137,
(175, 135, 135): 138,
(175, 135, 175): 139,
(175, 135, 215): 140,
(175, 135, 255): 141,
(175, 175, 0): 142,
(175, 175, 95): 143,
(175, 175, 135): 144,
(175, 175, 175): 145,
(175, 175, 215): 146,
(175, 175, 255): 147,
(175, 215, 0): 148,
(175, 215, 95): 149,
(175, 215, 135): 150,
(175, 215, 175): 151,
(175, 215, 215): 152,
(175, 215, 255): 153,
(175, 255, 0): 154,
(175, 255, 95): 155,
(175, 255, 135): 156,
(175, 255, 175): 157,
(175, 255, 215): 158,
(175, 255, 255): 159,
(178, 178, 178): 249,
(188, 188, 188): 250,
(192, 192, 192): 7,
(198, 198, 198): 251,
(208, 208, 208): 252,
(215, 0, 0): 160,
(215, 0, 95): 161,
(215, 0, 135): 162,
(215, 0, 175): 163,
(215, 0, 215): 164,
(215, 0, 255): 165,
(215, 95, 0): 166,
(215, 95, 95): 167,
(215, 95, 135): 168,
(215, 95, 175): 169,
(215, 95, 215): 170,
(215, 95, 255): 171,
(215, 135, 0): 172,
(215, 135, 95): 173,
(215, 135, 135): 174,
(215, 135, 175): 175,
(215, 135, 215): 176,
(215, 135, 255): 177,
(215, 175, 0): 178,
(215, 175, 95): 179,
(215, 175, 135): 180,
(215, 175, 175): 181,
(215, 175, 215): 182,
(215, 175, 255): 183,
(215, 215, 0): 184,
(215, 215, 95): 185,
(215, 215, 135): 186,
(215, 215, 175): 187,
(215, 215, 215): 188,
(215, 215, 255): 189,
(215, 255, 0): 190,
(215, 255, 95): 191,
(215, 255, 135): 192,
(215, 255, 175): 193,
(215, 255, 215): 194,
(215, 255, 255): 195,
(218, 218, 218): 253,
(228, 228, 228): 254,
(238, 238, 238): 255,
(255, 0, 0): 196,
(255, 0, 95): 197,
(255, 0, 135): 198,
(255, 0, 175): 199,
(255, 0, 215): 200,
(255, 0, 255): 13,
(255, 95, 0): 202,
(255, 95, 95): 203,
(255, 95, 135): 204,
(255, 95, 175): 205,
(255, 95, 215): 206,
(255, 95, 255): 207,
(255, 135, 0): 208,
(255, 135, 95): 209,
(255, 135, 135): 210,
(255, 135, 175): 211,
(255, 135, 215): 212,
(255, 135, 255): 213,
(255, 175, 0): 214,
(255, 175, 95): 215,
(255, 175, 135): 216,
(255, 175, 175): 217,
(255, 175, 215): 218,
(255, 175, 255): 219,
(255, 215, 0): 220,
(255, 215, 95): 221,
(255, 215, 135): 222,
(255, 215, 175): 223,
(255, 215, 215): 224,
(255, 215, 255): 225,
(255, 255, 0): 11,
(255, 255, 95): 227,
(255, 255, 135): 228,
(255, 255, 175): 229,
(255, 255, 215): 230,
(255, 255, 255): 231}
def hexstr2num(hexstr):
return int(hexstr, 16)
def rgb2short(r, g, b):
""" Find the closest xterm-256 approximation to the given RGB value.
@param r,g,b: each is a number between 0-255 for the Red, Green, and Blue values
@returns: integer between 0 and 255, compatible with xterm.
>>> rgb2short(18, 52, 86)
23
>>> rgb2short(255, 255, 255)
231
>>> rgb2short(13, 173, 214) # vimeo logo
38
"""
incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
# Break 6-char RGB code into 3 integer vals.
parts = [ r, g, b]
res = []
for part in parts:
i = 0
while i < len(incs)-1:
s, b = incs[i], incs[i+1] # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1: closest = s
else: closest = b
res.append(closest)
break
i += 1
#print '***', res
return RGB2SHORT_DICT[tuple(res)]
#---------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit |
HuaweiSwitch/ansible | lib/ansible/modules/network/dellos6/dellos6_command.py | 46 | 7467 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos6_command
version_added: "2.2"
author: "Abirami N (@abirami-n)"
short_description: Run commands on remote devices running Dell OS6
description:
- Sends arbitrary commands to a Dell OS6 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos6_config) to configure Dell OS6 devices.
extends_documentation_fragment: dellos6
options:
commands:
description:
- List of commands to send to the remote dellos6 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) as expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos6_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains Dell
dellos6_command:
commands: show version
wait_for: result[0] contains Dell
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos6_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos6_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Dell
- result[1] contains Access
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.dellos6 import run_commands
from ansible.module_utils.dellos6 import dellos6_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos6_command does not support running config mode '
'commands. Please use dellos6_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos6_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
rugk/threema-msgapi-sdk-python | threema/gateway/key.py | 1 | 3942 | """
Contains functions to decode, encode and generate keys.
"""
import enum
import hashlib
import hmac
import libnacl.public
import libnacl.encode
__all__ = ('HMAC', 'Key')
class HMAC(object):
"""
A collection of HMAC functions used for the gateway service.
"""
keys = {
'email': b'\x30\xa5\x50\x0f\xed\x97\x01\xfa\x6d\xef\xdb\x61\x08\x41\x90\x0f'
b'\xeb\xb8\xe4\x30\x88\x1f\x7a\xd8\x16\x82\x62\x64\xec\x09\xba\xd7',
'phone': b'\x85\xad\xf8\x22\x69\x53\xf3\xd9\x6c\xfd\x5d\x09\xbf\x29\x55\x5e'
b'\xb9\x55\xfc\xd8\xaa\x5e\xc4\xf9\xfc\xd8\x69\xe2\x58\x37\x07\x23'
}
@staticmethod
def hash(message, hash_type):
"""
Generate the hash for a message type.
Arguments:
- `message`: A message.
- `hash_type`: `email` or `phone`.
Return a :class:`hmac.HMAC` instance.
"""
return hmac.new(HMAC.keys[hash_type], message.encode('ascii'), hashlib.sha256)
class Key(object):
"""
Encode or decode a key.
"""
separator = ':'
@enum.unique
class Type(enum.Enum):
"""
The type of a key.
"""
private = 'private'
public = 'public'
@staticmethod
def decode(encoded_key, expected_type):
"""
Decode a key and check its type if required.
Arguments:
- `encoded_key`: The encoded key.
- `expected_type`: One of the types of :class:`Key.Type`.
Return the key as an :class:`libnacl.public.SecretKey` or
:class:`libnacl.public.PublicKey` instance.
"""
# Split key
try:
type_, key = encoded_key.split(Key.separator)
except ValueError as exc:
raise KeyError('Invalid key format') from exc
type_ = Key.Type(type_)
# Check type
if type_ != expected_type:
raise KeyError('Invalid key type: {}, expected: {}'.format(
type_, expected_type
))
# De-hexlify
key = libnacl.encode.hex_decode(key)
# Convert to SecretKey or PublicKey
if type_ == Key.Type.private:
key = libnacl.public.SecretKey(key)
elif type_ == Key.Type.public:
key = libnacl.public.PublicKey(key)
return key
@staticmethod
def encode(libnacl_key):
"""
Encode a key.
Arguments:
- `libnacl_key`: An instance of either a
:class:`libnacl.public.SecretKey` or a
:class:`libnacl.public.PublicKey`.
Return the encoded key.
"""
# Detect key type and hexlify
if isinstance(libnacl_key, libnacl.public.SecretKey):
type_ = Key.Type.private
key = libnacl_key.hex_sk()
elif isinstance(libnacl_key, libnacl.public.PublicKey):
type_ = Key.Type.public
key = libnacl.encode.hex_encode(libnacl_key.pk)
else:
raise KeyError('Unknown key type: {}'.format(libnacl_key))
# Encode key
return Key.separator.join((type_.value, key.decode('utf-8')))
@staticmethod
def generate_pair():
"""
Generate a new key pair.
Return the key pair as a tuple of a
:class:`libnacl.public.SecretKey` instance and a
:class:`libnacl.public.PublicKey` instance.
"""
private_key = libnacl.public.SecretKey()
public_key = libnacl.public.PublicKey(private_key.pk)
return private_key, public_key
@staticmethod
def derive_public(private_key):
"""
Derive a public key from a class:`libnacl.public.SecretKey`
instance.
Arguments:
- `private_key`: A class:`libnacl.public.SecretKey`
instance.
Return the :class:`libnacl.public.PublicKey` instance.
"""
return libnacl.public.PublicKey(private_key.pk)
| mit |
delta2323/chainer | tests/chainer_tests/functions_tests/activation_tests/test_log_softmax.py | 3 | 3936 | import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'shape': [None, (2, 3), (2, 2, 3), (2, 2, 2, 3)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLogSoftmax(unittest.TestCase):
def setUp(self):
if self.shape is None:
# For checking numerical stability
value = -5 if self.dtype == numpy.float16 else -1000
self.x = numpy.array([[value, 1]], dtype=self.dtype)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data, use_cudnn='always'):
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.log_softmax(x)
self.assertEqual(y.data.dtype, self.dtype)
log_z = numpy.ufunc.reduce(
numpy.logaddexp, self.x, axis=1, keepdims=True)
y_expect = self.x - log_z
testing.assert_allclose(
y_expect, y.data, **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, gy_data, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
functions.LogSoftmax(), x_data, gy_data,
**self.check_backward_options)
@condition.retry(10)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(10)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(10)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestLogSoftmaxCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto', 3000)
def forward(self):
x = chainer.Variable(self.x)
return functions.log_softmax(x)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch('cupy.cudnn.cudnn.softmaxForward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.softmaxBackward') as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| mit |
jonathan-beard/edx-platform | common/test/acceptance/pages/xblock/acid.py | 172 | 3445 | """
PageObjects related to the AcidBlock
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, BrokenPromise
from .utils import wait_for_xblock_initialization
class AcidView(PageObject):
"""
A :class:`.PageObject` representing the rendered view of the :class:`.AcidBlock`.
"""
url = None
def __init__(self, browser, context_selector):
"""
Args:
browser (selenium.webdriver): The Selenium-controlled browser that this page is loaded in.
context_selector (str): The selector that identifies where this :class:`.AcidBlock` view
is on the page.
"""
super(AcidView, self).__init__(browser)
if isinstance(context_selector, unicode):
context_selector = context_selector.encode('utf-8')
self.context_selector = context_selector
def is_browser_on_page(self):
# First make sure that an element with the view-container class is present on the page,
# and then wait to make sure that the xblock has finished initializing.
return (
self.q(css='{} .acid-block'.format(self.context_selector)).present and
wait_for_xblock_initialization(self, self.context_selector) and
self._ajax_finished()
)
def _ajax_finished(self):
try:
EmptyPromise(
lambda: self.browser.execute_script("return jQuery.active") == 0,
"AcidBlock tests still running",
timeout=240
).fulfill()
except BrokenPromise:
return False
else:
return True
def test_passed(self, test_selector):
"""
Return whether a particular :class:`.AcidBlock` test passed.
"""
selector = '{} .acid-block {} .pass'.format(self.context_selector, test_selector)
return bool(self.q(css=selector).results)
def child_test_passed(self, test_selector):
"""
Return whether a particular :class:`.AcidParentBlock` test passed.
"""
selector = '{} .acid-parent-block {} .pass'.format(self.context_selector, test_selector)
return bool(self.q(css=selector).execute(try_interval=0.1, timeout=3))
@property
def init_fn_passed(self):
"""
Whether the init-fn test passed in this view of the :class:`.AcidBlock`.
"""
return self.test_passed('.js-init-run')
@property
def child_tests_passed(self):
"""
Whether the tests of children passed
"""
return all([
self.child_test_passed('.child-counts-match'),
self.child_test_passed('.child-values-match')
])
@property
def resource_url_passed(self):
"""
Whether the resource-url test passed in this view of the :class:`.AcidBlock`.
"""
return self.test_passed('.local-resource-test')
def scope_passed(self, scope):
return all(
self.test_passed('.scope-storage-test.scope-{} {}'.format(scope, test))
for test in (
".server-storage-test-returned",
".server-storage-test-succeeded",
".client-storage-test-returned",
".client-storage-test-succeeded",
)
)
def __repr__(self):
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.context_selector)
| agpl-3.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.1.54/external/webkit/Tools/QueueStatusServer/__init__.py | 15 | 1155 | # Required for Python to search this directory for module files
# This __init__.py makes unit testing easier by allowing us to treat the entire server as one big module.
# This file is only accessed when not on AppEngine itself.
# Make sure that this module will load in that case by including paths to
# the default Google AppEngine install.
def fix_sys_path():
import sys
import os
# AppEngine imports a bunch of google-specific modules. Thankfully the dev_appserver
# knows how to do the same. Re-use the dev_appserver fix_sys_path logic to import
# all the google.appengine.* stuff so we can run under test-webkitpy
sys.path.append("/usr/local/google_appengine")
import dev_appserver
dev_appserver.fix_sys_path()
# test-webkitpy adds $WEBKIT/WebKitTools to the sys.path and imports
# QueueStatusServer to run all the tests. However, when AppEngine runs
# our code QueueStatusServer is the root (and thus in the path).
# Emulate that here for test-webkitpy so that we can import "model."
# not "QueueStatusServer.model.", etc.
sys.path.append(os.path.dirname(__file__))
fix_sys_path()
| gpl-2.0 |
mkaluza/external_chromium_org | chrome/test/mini_installer/test_installer.py | 27 | 9487 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script tests the installer with test cases specified in the config file.
For each test case, it checks that the machine states after the execution of
each command match the expected machine states. For more details, take a look at
the design documentation at http://goo.gl/Q0rGM6
"""
import json
import optparse
import os
import subprocess
import sys
import unittest
from variable_expander import VariableExpander
import verifier_runner
class Config:
"""Describes the machine states, actions, and test cases.
Attributes:
states: A dictionary where each key is a state name and the associated value
is a property dictionary describing that state.
actions: A dictionary where each key is an action name and the associated
value is the action's command.
tests: An array of test cases.
"""
def __init__(self):
self.states = {}
self.actions = {}
self.tests = []
class InstallerTest(unittest.TestCase):
"""Tests a test case in the config file."""
def __init__(self, test, config, variable_expander):
"""Constructor.
Args:
test: An array of alternating state names and action names, starting and
ending with state names.
config: The Config object.
variable_expander: A VariableExpander object.
"""
super(InstallerTest, self).__init__()
self._test = test
self._config = config
self._variable_expander = variable_expander
self._verifier_runner = verifier_runner.VerifierRunner()
self._clean_on_teardown = True
def __str__(self):
"""Returns a string representing the test case.
Returns:
A string created by joining state names and action names together with
' -> ', for example, 'Test: clean -> install chrome -> chrome_installed'.
"""
return 'Test: %s\n' % (' -> '.join(self._test))
def runTest(self):
"""Run the test case."""
# |test| is an array of alternating state names and action names, starting
# and ending with state names. Therefore, its length must be odd.
self.assertEqual(1, len(self._test) % 2,
'The length of test array must be odd')
state = self._test[0]
self._VerifyState(state)
# Starting at index 1, we loop through pairs of (action, state).
for i in range(1, len(self._test), 2):
action = self._test[i]
RunCommand(self._config.actions[action], self._variable_expander)
state = self._test[i + 1]
self._VerifyState(state)
# If the test makes it here, it means it was successful, because RunCommand
# and _VerifyState throw an exception on failure.
self._clean_on_teardown = False
def tearDown(self):
"""Cleans up the machine if the test case fails."""
if self._clean_on_teardown:
RunCleanCommand(True, self._variable_expander)
def shortDescription(self):
"""Overridden from unittest.TestCase.
We return None as the short description to suppress its printing.
The default implementation of this method returns the docstring of the
runTest method, which is not useful since it's the same for every test case.
The description from the __str__ method is informative enough.
"""
return None
def _VerifyState(self, state):
"""Verifies that the current machine state matches a given state.
Args:
state: A state name.
"""
try:
self._verifier_runner.VerifyAll(self._config.states[state],
self._variable_expander)
except AssertionError as e:
# If an AssertionError occurs, we intercept it and add the state name
# to the error message so that we know where the test fails.
raise AssertionError("In state '%s', %s" % (state, e))
def RunCommand(command, variable_expander):
"""Runs the given command from the current file's directory.
This function throws an Exception if the command returns with non-zero exit
status.
Args:
command: A command to run. It is expanded using Expand.
variable_expander: A VariableExpander object.
"""
expanded_command = variable_expander.Expand(command)
script_dir = os.path.dirname(os.path.abspath(__file__))
exit_status = subprocess.call(expanded_command, shell=True, cwd=script_dir)
if exit_status != 0:
raise Exception('Command %s returned non-zero exit status %s' % (
expanded_command, exit_status))
def RunCleanCommand(force_clean, variable_expander):
"""Puts the machine in the clean state (i.e. Chrome not installed).
Args:
force_clean: A boolean indicating whether to force cleaning existing
installations.
variable_expander: A VariableExpander object.
"""
# TODO(sukolsak): Read the clean state from the config file and clean
# the machine according to it.
# TODO(sukolsak): Handle Chrome SxS installs.
commands = []
interactive_option = '--interactive' if not force_clean else ''
for level_option in ('', '--system-level'):
commands.append('python uninstall_chrome.py '
'--chrome-long-name="$CHROME_LONG_NAME" '
'--no-error-if-absent %s %s' %
(level_option, interactive_option))
RunCommand(' && '.join(commands), variable_expander)
def MergePropertyDictionaries(current_property, new_property):
"""Merges the new property dictionary into the current property dictionary.
This is different from general dictionary merging in that, in case there are
keys with the same name, we merge values together in the first level, and we
override earlier values in the second level. For more details, take a look at
http://goo.gl/uE0RoR
Args:
current_property: The property dictionary to be modified.
new_property: The new property dictionary.
"""
for key, value in new_property.iteritems():
if key not in current_property:
current_property[key] = value
else:
assert(isinstance(current_property[key], dict) and
isinstance(value, dict))
# This merges two dictionaries together. In case there are keys with
# the same name, the latter will override the former.
current_property[key] = dict(
current_property[key].items() + value.items())
def ParsePropertyFiles(directory, filenames):
"""Parses an array of .prop files.
Args:
property_filenames: An array of Property filenames.
directory: The directory where the Config file and all Property files
reside in.
Returns:
A property dictionary created by merging all property dictionaries specified
in the array.
"""
current_property = {}
for filename in filenames:
path = os.path.join(directory, filename)
new_property = json.load(open(path))
MergePropertyDictionaries(current_property, new_property)
return current_property
def ParseConfigFile(filename):
"""Parses a .config file.
Args:
config_filename: A Config filename.
Returns:
A Config object.
"""
config_data = json.load(open(filename, 'r'))
directory = os.path.dirname(os.path.abspath(filename))
config = Config()
config.tests = config_data['tests']
for state_name, state_property_filenames in config_data['states']:
config.states[state_name] = ParsePropertyFiles(directory,
state_property_filenames)
for action_name, action_command in config_data['actions']:
config.actions[action_name] = action_command
return config
def RunTests(mini_installer_path, config, force_clean):
"""Tests the installer using the given Config object.
Args:
mini_installer_path: The path to mini_installer.exe.
config: A Config object.
force_clean: A boolean indicating whether to force cleaning existing
installations.
Returns:
True if all the tests passed, or False otherwise.
"""
suite = unittest.TestSuite()
variable_expander = VariableExpander(mini_installer_path)
RunCleanCommand(force_clean, variable_expander)
for test in config.tests:
suite.addTest(InstallerTest(test, config, variable_expander))
result = unittest.TextTestRunner(verbosity=2).run(suite)
return result.wasSuccessful()
def main():
usage = 'usage: %prog [options] config_filename'
parser = optparse.OptionParser(usage, description='Test the installer.')
parser.add_option('--build-dir', default='out',
help='Path to main build directory (the parent of the '
'Release or Debug directory)')
parser.add_option('--target', default='Release',
help='Build target (Release or Debug)')
parser.add_option('--force-clean', action='store_true', dest='force_clean',
default=False, help='Force cleaning existing installations')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
config_filename = args[0]
mini_installer_path = os.path.join(options.build_dir, options.target,
'mini_installer.exe')
assert os.path.exists(mini_installer_path), ('Could not find file %s' %
mini_installer_path)
config = ParseConfigFile(config_filename)
if not RunTests(mini_installer_path, config, options.force_clean):
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
amian84/commiecc | commiecc/slavelib/utils.py | 1 | 2819 | # -*- coding: utf-8 -*-
# vim: ts=4
###
#
# CommieCC is the legal property of J. Félix Ontañón <[email protected]>
# Copyright (c) 2009 J. Félix Ontañón
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###
import dbus
import threading
import logging
LOG_MAIN = 'main_log'
LOG_FILE = '/var/log/commiecc-slave.log'
LOG_LEVEL = logging.DEBUG
LOG_FORMAT = '%(asctime)s %(levelname)s - %(message)s'
LOG_DATE_FORMAT = '%H:%M:%S'
def setup_main_logger():
import logging.handlers
main_logger = logging.getLogger(LOG_MAIN)
main_logger.setLevel(LOG_LEVEL)
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=300000,
backupCount=5)
handler.setFormatter(logging.Formatter(LOG_FORMAT, datefmt=LOG_DATE_FORMAT))
main_logger.addHandler(handler)
class log():
def __init__(self, msg, level=logging.DEBUG):
self.msg = msg
self.level = level
self.logger = logging.getLogger(LOG_MAIN)
def __call__(self, f):
def wrapped_f(*args):
self.logger.log(self.level, self.msg)
return f(*args)
return wrapped_f
import PAM
# Full-based on TcosMonitor TcosPAM.py module file by Mario Izquierdo
# http://wiki.tcosproject.org/Utils/TcosMonitor/
def pam_auth(user, password):
class AuthConv:
def __init__(self, password):
self.password = password
def __call__(self, auth, query_list, userData):
resp = []
for query, qt in query_list:
if qt == PAM.PAM_PROMPT_ECHO_ON:
resp.append((self.password, 0))
elif qt == PAM.PAM_PROMPT_ECHO_OFF:
resp.append((self.password, 0))
elif qt == PAM.PAM_PROMPT_ERROR_MSG or type == PAM.PAM_PROMPT_TEXT_INFO:
print query
resp.append(('', 0))
else:
return None
return resp
auth = PAM.pam()
auth.start("passwd")
auth.set_item(PAM.PAM_USER, user)
auth.set_item(PAM.PAM_CONV, AuthConv(password))
try:
auth.authenticate()
auth.acct_mgmt()
return True
except PAM.error, resp:
return False
| gpl-3.0 |
greencoder/mybitly | Test Server/libraries/jinja2/compiler.py | 335 | 63846 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
# The getattr is necessary for pypy which does not set this attribute if
# no closure is on the function
unoptimize_before_dead_code = bool(
getattr(unoptimize_before_dead_code(), '__closure__', None))
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = environment.code_generator_class(environment, name, filename,
stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, float, complex, range_type,
Markup) + string_types):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_AssignBlock(self, node):
"""Stop visiting at block assigns."""
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in iteritems(aliases):
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overridden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overridden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overridden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('l_loop = missing')
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
allow_constant_finalize = True
if self.environment.finalize:
func = self.environment.finalize
if getattr(func, 'contextfunction', False) or \
getattr(func, 'evalcontextfunction', False):
allow_constant_finalize = False
elif getattr(func, 'environmentfunction', False):
finalize = lambda x: text_type(
self.environment.finalize(self.environment, x))
else:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
if not allow_constant_finalize:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
"contextfunction", False):
self.write('context, ')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
'contextfunction', False):
self.write('context, ')
elif getattr(self.environment.finalize,
'evalcontextfunction', False):
self.write('context.eval_ctx, ')
elif getattr(self.environment.finalize,
'environmentfunction', False):
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def make_assignment_frame(self, frame):
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if not frame.toplevel:
return frame
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
return assignment_frame
def export_assigned_vars(self, frame, assignment_frame):
if not frame.toplevel:
return
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
def visit_Assign(self, node, frame):
self.newline(node)
assignment_frame = self.make_assignment_frame(frame)
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
self.export_assigned_vars(frame, assignment_frame)
def visit_AssignBlock(self, node, frame):
block_frame = frame.inner()
block_frame.inspect(node.body)
aliases = self.push_scope(block_frame)
self.pull_locals(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.pop_scope(aliases, block_frame)
assignment_frame = self.make_assignment_frame(frame)
self.newline(node)
self.visit(node.target, assignment_frame)
self.write(' = concat(%s)' % block_frame.buffer)
self.export_assigned_vars(frame, assignment_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| mit |
wziyong/casperfpga | src/scroll.py | 1 | 9054 | """
Playing with ncurses in Python to scroll up and down, left and right, through a list of data
that is periodically refreshed.
Revs:
2010-12-11 JRM Added concat for status line to prevent bailing on small terminals.
Code cleanup to prevent modification of external variables.
Added left, right page controls
"""
import curses
def screen_teardown():
"""Restore sensible options to the terminal upon exit
"""
curses.nocbreak()
curses.echo()
curses.endwin()
class Screenline(object):
def __init__(self, data, xpos=-1, ypos=-1, absolute=False, attributes=curses.A_NORMAL):
assert type(data) == str
self.data = data
self.xpos = xpos
self.ypos = ypos
self.absolute = absolute
if not isinstance(attributes, list):
attributes = [attributes]
self.line_attributes = attributes
class Scroll(object):
"""Scrollable ncurses screen.
"""
def __init__(self, debug=False):
"""Constructor
"""
self._instruction_string = ''
self._offset_y = 0
self._offset_x = 0
self._screen = None
self._curr_y = 0
self._curr_x = 0
self._sbuffer = []
self._xmin = 0
self._xmax = 0
self._ymin = 0
self._ymax = 0
self._debugging = debug
# set up the screen
def screen_setup(self):
"""Set up a curses screen object and associated options
"""
self._screen = curses.initscr()
self._screen.keypad(1)
self._screen.nodelay(1)
curses.noecho()
curses.cbreak()
height, width = self._screen.getmaxyx()
self._ymax = height - 1
self._xmax = width
def on_keypress(self):
"""
Handle key presses.
"""
key = self._screen.getch()
if key > 0:
if key == 259:
self._offset_y -= 1 # up
elif key == 258:
self._offset_y += 1 # down
elif key == 261:
self._offset_x -= 1 # right
elif key == 260:
self._offset_x += 1 # left
elif chr(key) == 'q':
return [-1, 'q']
elif chr(key) == 'u':
self._offset_y -= self._ymax + 1
elif chr(key) == 'd':
self._offset_y += self._ymax + 1
elif chr(key) == 'l':
self._offset_x += self._xmax
elif chr(key) == 'r':
self._offset_x -= self._xmax
elif chr(key) == 'h':
self._offset_x = 0
self._offset_y = 0
try:
char = chr(key)
except ValueError:
char = '_'
return [key, char]
else:
return [0, '_']
def clear_screen(self):
"""Clear the ncurses screen.
"""
self._screen.clear()
def add_line(self, new_line, xpos=-1, ypos=-1, absolute=False, attributes=curses.A_NORMAL):
"""Add a text line to the screen buffer.
"""
if not isinstance(new_line, str):
raise TypeError('new_line must be a string!')
yposition = ypos
if yposition < 0:
yposition = self._curr_y
self._curr_y += 1
self._sbuffer.append(Screenline(new_line, xpos, yposition, absolute, attributes))
def get_current_line(self):
"""Return the current y position of the internal screen buffer.
"""
return self._curr_y
def set_current_line(self, linenum):
"""Set the current y position of the internal screen buffer.
"""
self._curr_y = linenum
def _load_buffer_from_list(self, screendata):
"""Load the internal screen buffer from a given mixed list of
strings and Screenlines.
"""
if not isinstance(screendata, list):
raise TypeError('Provided screen data must be a list!')
self._sbuffer = []
for line in screendata:
if not isinstance(line, Screenline):
line = Screenline(line)
self._sbuffer.append(line)
def _sbuffer_y_max(self):
"""Work out how many lines the sbuffer needs.
"""
maxy = 0
for sline in self._sbuffer:
if sline.ypos == -1:
maxy += 1
else:
maxy = max(maxy, sline.ypos)
return maxy
def _calculate_screen_pos(self, sline, yposition):
if sline.absolute:
strs = 0
stre = self._xmax
strx = sline.xpos
stry = sline.ypos
else:
stringx = max(sline.xpos, 0) + self._offset_x
if stringx < 0:
xpos = 0
strs = stringx * -1
else:
xpos = stringx
strs = 0
stre = strs + self._xmax
stry = sline.ypos
if stry == -1:
stry = yposition
yposition += 1
strx = xpos
stry -= self._offset_y
return strs, stre, strx, stry, yposition
def draw_screen(self, data=None):
"""
Draw the screen using the provided data
TODO: ylimits, xlimits, proper line counts in the status
"""
self._screen.clear()
if data is not None:
self._load_buffer_from_list(data)
num_lines_total = self._sbuffer_y_max()
yposition = 0
top_line = 0
for sline in self._sbuffer:
(strs, stre, strx, stry, yposition) = self._calculate_screen_pos(sline, yposition)
# if we want to put the string outside the right-hand edge of the terminal, bail
if strx >= self._xmax:
continue
drawstring = sline.data[strs:stre]
if self._debugging:
drawstring += '_(%d,%d,[%d:%d])' % (strx, stry, strs, stre)
try:
if sline.absolute:
self._screen.addstr(stry, strx, drawstring, *sline.line_attributes)
elif (stry >= self._ymin) and (stry < self._ymax):
self._screen.addstr(stry, strx, drawstring, *sline.line_attributes)
top_line = self._offset_y - yposition
except Exception, e:
e.args = ('strs(%d) stre(%d) strx(%d) stry(%d) xmax(%d) ymax(%d)_\'%s\' - ' % (strs, stre, strx, stry,
self._xmax, self._ymax,
drawstring) + e.args[0],)
raise
if yposition + self._offset_y >= self._ymax - 2:
break
if self._debugging:
self._screen.addstr(self._ymax - 2, 0, 'offsets(%d,%d) dims(%d,%d) sbuf_ymax(%d) xlim(%d,%d) ylim(%d,%d)' %
(self._offset_x, self._offset_y, self._xmax, self._ymax + 1,
num_lines_total, self._xmin, self._xmax, self._ymin, self._ymax))
stat_line = 'Showing line %i to %i of %i. Column offset %i. %s Scroll with arrow keys. \
u, d, l, r = page up, down, left and right. h = home, q = quit.' %\
(top_line, yposition, num_lines_total, self._offset_x, self._instruction_string)
self._screen.addstr(self._ymax, 0, stat_line, curses.A_REVERSE)
self._screen.refresh()
def clear_buffer(self):
self._sbuffer = []
self._curr_y = 0
self._curr_x = 0
def set_xlimits(self, xmin=-1, xmax=-1):
if xmin == -1 and xmax == -1:
return
if xmin > -1:
self._xmin = xmin
if xmax > -1:
self._xmax = xmax
def set_ylimits(self, ymin=-1, ymax=-1):
if ymin == -1 and ymax == -1:
return
if ymin > -1:
self._ymin = ymin
if ymax > -1:
self._ymax = ymax
def set_ypos(self, newpos):
self._curr_y = newpos
# set and get the instruction string at the bottom
def get_instruction_string(self):
return self._instruction_string
def set_instruction_string(self, new_string):
self._instruction_string = new_string
def draw_string(self, new_string, **kwargs):
"""Draw a new line to the screen, takes an argument as to whether the screen should be
immediately refreshed or not
"""
raise NotImplementedError
try:
refresh = kwargs.pop('refresh')
except KeyError:
refresh = False
self._screen.addstr(self._curr_y, self._curr_x, new_string, **kwargs)
if new_string.endswith('\n'):
self._curr_y += 1
self._curr_x = 0
else:
self._curr_x += len(new_string)
if refresh:
self._screen.refresh()
# end of file
| gpl-2.0 |
msassmann/texcavator | services/es.py | 1 | 21842 | # -*- coding: utf-8 -*-
"""Elasticsearch functionality"""
import json
import logging
import os
from collections import Counter, defaultdict
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.client import indices
from django.conf import settings
from texcavator.utils import daterange2dates
logger = logging.getLogger(__name__)
_ES_RETURN_FIELDS = ('article_dc_title',
'paper_dcterms_temporal',
'paper_dcterms_spatial',
'paper_dc_title',
'paper_dc_date')
_KB_DISTRIBUTION_VALUES = {'sd_national': 'Landelijk',
'sd_regional': 'Regionaal/lokaal',
'sd_antilles': 'Nederlandse Antillen',
'sd_surinam': 'Suriname',
'sd_indonesia': 'Nederlands-Indië / Indonesië'}
_KB_ARTICLE_TYPE_VALUES = {'st_article': 'artikel',
'st_advert': 'advertentie',
'st_illust': 'illustratie met onderschrift',
'st_family': 'familiebericht'}
_DOCUMENT_TEXT_FIELD = 'text_content'
_DOCUMENT_TITLE_FIELD = 'article_dc_title'
_AGG_FIELD = _DOCUMENT_TEXT_FIELD
_STEMMING_ANALYZER = 'dutch_analyzer'
def _es():
"""Returns ElasticSearch instance."""
node = {'host': settings.ELASTICSEARCH_HOST,
'port': settings.ELASTICSEARCH_PORT}
if settings.ELASTICSEARCH_USERNAME:
node['http_auth'] = (settings.ELASTICSEARCH_USERNAME, settings.ELASTICSEARCH_PASSWORD)
node['use_ssl'] = settings.ELASTICSEARCH_USE_SSL
return Elasticsearch([node])
def do_search(idx, typ, query, start, num, date_ranges, exclude_distributions,
exclude_article_types, selected_pillars, return_source=False, sort_order='_score'):
"""Returns ElasticSearch search results.
Fetch all documents matching the query and return a list of
elasticsearch results.
This method accepts boolean queries in the Elasticsearch query string
syntax (see Elasticsearch reference).
Parameters:
idx : str
The name of the elasticsearch index
typ : str
The type of document requested
query : str
A query string in the Elasticsearch query string language
start : int
An integer representing the index of the first result to be
retrieved
num : int
The total number of results to be retrieved
date_ranges : list(dict)
A list of dictionaries containg the upper and lower dates of the
requested date ranges
exclude_distributions : list
A list of strings respresenting distributions that should be
excluded from the search
exclude_article_types : list
A list of strings representing article types that should be
excluded from the search
selected_pillars : list
A list of string representing pillars that should be included into
the search. Each pillar is linked to a list of newspapers.
return_source : boolean, optional
A boolean indicating whether the _source of ES documents should be
returned or a smaller selection of document fields. The smaller set
of document fields (stored in _ES_RETURN_FIELDS) is the default
sort_order: string, optional
The sort order for this query. Syntax is fieldname:order, multiple
sort orders can be separated by commas. Note that if the sort_order
doesn't contain _score, no scores will be returned.
Returns:
validity : boolean
A boolean indicating whether the input query string is valid.
results : list
A list of elasticsearch results or a message explaining why the
input query string is invalid.
"""
q = create_query(query, date_ranges, exclude_distributions,
exclude_article_types, selected_pillars)
valid_q = indices.IndicesClient(_es()).validate_query(index=idx,
doc_type=typ,
body=q,
explain=True)
if valid_q.get('valid'):
if return_source:
# for each document return the _source field that contains all
# document fields (no fields parameter in the ES call)
return True, _es().search(index=idx, doc_type=typ, body=q,
from_=start, size=num, sort=sort_order)
else:
# for each document return the fields listed in_ES_RETURN_FIELDS
return True, _es().search(index=idx, doc_type=typ, body=q,
fields=_ES_RETURN_FIELDS, from_=start,
size=num, sort=sort_order)
return False, valid_q.get('explanations')[0].get('error')
def count_search_results(idx, typ, query, date_range, exclude_distributions,
exclude_article_types, selected_pillars):
"""Count the number of results for a query
"""
q = create_query(query, date_range, exclude_distributions,
exclude_article_types, selected_pillars)
return _es().count(index=idx, doc_type=typ, body=q)
def get_document(idx, typ, doc_id):
"""Return a document given its id.
Parameters:
idx : str
The name of the elasticsearch index
typ : str
The type of document requested
doc_id : str
The id of the document to be retrieved
"""
try:
result = _es().get(index=idx, doc_type=typ, id=doc_id)
except:
return None
return result['_source']
def create_query(query_str, date_ranges, exclude_distributions,
exclude_article_types, selected_pillars):
"""Create elasticsearch query from input string.
This method accepts boolean queries in the Elasticsearch query string
syntax (see Elasticsearch reference).
Returns a dict that represents the query in the elasticsearch query DSL.
"""
filter_must = []
filter_should = []
filter_must_not = []
for date_range in date_ranges:
filter_should.append(
{
'range': {
'paper_dc_date': {
'gte': date_range['lower'],
'lte': date_range['upper']
}
}
}
)
# Filters on newspapers. This reads from a local file; as Celery can't read from the database.
newspaper_ids = []
if selected_pillars:
try:
with open(os.path.join(settings.PROJECT_PARENT, 'newspapers.txt'), 'rb') as in_file:
categorization = json.load(in_file)
for pillar, n_ids in categorization.iteritems():
if int(pillar) in selected_pillars:
newspaper_ids.extend(n_ids)
except IOError:
logging.warning('No newspaper classification found. Continuing without filter on newspapers.')
if newspaper_ids:
filter_must.append({'terms': {'paper_dc_identifier': newspaper_ids}})
for ds in exclude_distributions:
filter_must_not.append(
{"term": {"paper_dcterms_spatial": _KB_DISTRIBUTION_VALUES[ds]}})
for typ in exclude_article_types:
filter_must_not.append(
{"term": {"article_dc_subject": _KB_ARTICLE_TYPE_VALUES[typ]}})
query = {
'query': {
'filtered': {
'filter': {
'bool': {
'must': filter_must,
'should': filter_should,
'must_not': filter_must_not
}
}
}
}
}
# Add the query string part.
if query_str:
# Temporary hotfix for duplicate newspapers, see #73.
if getattr(settings, 'KB_HOTFIX_DUPLICATE_NEWSPAPERS', True):
query_str += ' -identifier:ddd\:11*'
alw = getattr(settings, 'QUERY_ALLOW_LEADING_WILDCARD', True)
query['query']['filtered']['query'] = {'query_string': {'query': query_str, 'allow_leading_wildcard': alw}}
return query
def create_ids_query(ids):
"""Returns an Elasticsearch ids query.
Create Elasticsearch query that returns documents based on a list of
ids.
Parameters:
ids : list
A list containing document ids
Returns:
query : dict
A dictionary representing an ES ids query
"""
query = {
'query': {
'filtered': {
'filter': {
'ids': {
'type': settings.ES_DOCTYPE,
'values': ids
}
}
}
}
}
return query
def create_day_statistics_query(date_range, agg_name):
"""Create ES query to gather day statistics for the given date range.
This function is used by the gatherstatistics management command.
"""
date_lower = datetime.strptime(date_range['lower'], '%Y-%m-%d').date()
date_upper = datetime.strptime(date_range['upper'], '%Y-%m-%d').date()
diff = date_upper-date_lower
num_days = diff.days
return {
'query': {
'filtered': {
'filter': {
'bool': {
'must': [
{
'range': {
'paper_dc_date': {
'gte': date_range['lower'],
'lte': date_range['upper']
}
}
}
]
}
},
'query': {
'match_all': {}
}
}
},
'aggs': {
agg_name: {
'terms': {
'field': 'paper_dc_date',
'size': num_days
}
}
},
'size': 0
}
def word_cloud_aggregation(agg_name, num_words=100):
"""Return aggragation part of terms aggregation (=word cloud) that can be
added to any Elasticsearch query."""
agg = {
agg_name: {
'terms': {
'field': _AGG_FIELD,
'size': num_words
}
}
}
return agg
def single_document_word_cloud(idx, typ, doc_id, min_length=0, stopwords=[], stems=False):
"""Return data required to draw a word cloud for a single document.
Parameters:
idx : str
The name of the elasticsearch index
typ : str
The type of document requested
doc_id : str
The id of the document the word cloud should be created for
min_length : int, optional
The minimum length of words in the word cloud
stopwords : list, optional
A list of words that should be removed from the word cloud
stems : boolean, optional
Whether or not we should look at the stemmed columns
Returns:
dict : dict
A dictionary that contains word frequencies for all the terms in
the document.
.. code-block:: javascript
{
'status': 'ok',
'result':
{
term: count
},
...
}
"""
if not doc_id:
return {
'status': 'error',
'error': 'No document id provided.'
}
bdy = {
'fields': get_cloud_fields(stems)
}
t_vector = _es().termvector(index=idx, doc_type=typ, id=doc_id, body=bdy)
if t_vector.get('found', False):
wordcloud = Counter()
for field, data in t_vector.get('term_vectors').iteritems():
for term, count_dict in data.get('terms').iteritems():
if term not in stopwords and len(term) >= min_length:
wordcloud[term] += int(count_dict.get('term_freq'))
return {
'result': wordcloud,
'status': 'ok'
}
return {
'status': 'error',
'error': 'Document with id "{}" could not be found.'.format(doc_id)
}
def multiple_document_word_cloud(idx, typ, query, date_ranges, dist, art_types, pillars,
ids=None):
"""Return data required to draw a word cloud for multiple documents
This function generates word cloud data using terms aggregations in ES.
However, for newspaper articles this approach is not feasible; ES runs out
of memory very quickly. Therefore, another approach to generating word
cloud data was added: termvector_word_cloud
See also:
:func:`single_document_word_cloud` generate data for a single document
word cloud
:func:`termvector_word_cloud` generate word cloud data using termvector
approach
"""
if not ids:
ids = []
agg_name = 'words'
# word cloud based on query
if query:
q = create_query(query, date_ranges, dist, art_types, pillars)
q['aggs'] = word_cloud_aggregation(agg_name)
# word cloud based on document ids
elif not query and len(ids) > 0:
q = create_ids_query(ids)
q['aggs'] = word_cloud_aggregation(agg_name)
else:
return {
'status': 'error',
'error': 'No valid query provided for word cloud generation.'
}
aggr = _es().search(index=idx, doc_type=typ, body=q, size=0)
aggr_result_list = aggr.get('aggregations').get(agg_name).get('buckets')
max_count = aggr_result_list[0].get('doc_count')
result = []
for term in aggr_result_list:
result.append({
'term': term.get('key'),
'count': term.get('doc_count')
})
return {
'max_count': max_count,
'result': result,
'status': 'ok',
'took': aggr.get('took', 0)
}
def termvector_wordcloud(idx, typ, doc_ids, min_length=0, stems=False, add_freqs=True):
"""Return word frequencies in a set of documents.
Return data required to draw a word cloud for multiple documents by
'manually' merging termvectors.
The counter returned by this method can be transformed into the input
expected by the interface by passing it to the normalize_cloud
method.
Parameters:
idx : str
The name of the elasticsearch index
typ : str
The type of document requested
doc_ids : list(str)
The requested documents
min_length : int, optional
The minimum length of words in the word cloud
stems : boolean, optional
Whether or not we should look at the stemmed columns
add_freqs : boolean, optional
Whether or not we should count total occurrences
See also
:func:`single_document_word_cloud` generate data for a single document
word cloud
:func:`multiple_document_word_cloud` generate word cloud data using
terms aggregation approach
"""
wordcloud = Counter()
# If no documents are provided, return an empty counter.
if not doc_ids:
return wordcloud
bdy = {
'ids': doc_ids,
'parameters': {
'fields': get_cloud_fields(stems),
'term_statistics': False,
'field_statistics': False,
'offsets': False,
'payloads': False,
'positions': False
}
}
t_vectors = _es().mtermvectors(index=idx, doc_type=typ, body=bdy)
for doc in t_vectors.get('docs'):
temp = defaultdict(int) if add_freqs else set()
for field, data in doc.get('term_vectors').iteritems():
for term, details in data.get('terms').iteritems():
if len(term) >= min_length:
if add_freqs:
temp[term] += int(details['term_freq'])
else:
temp.add(term) # only count individual occurrences
wordcloud.update(temp)
return wordcloud
def get_search_parameters(req_dict):
"""Return a tuple of search parameters extracted from a dictionary
Parameters:
req_dict : dict
A Django request dictionary
Returns:
dict : dict
A dictionary that contains query metadata
"""
query_str = req_dict.get('query', None)
start = int(req_dict.get('startRecord', 1))
result_size = int(req_dict.get('maximumRecords', 20))
date_range_str = req_dict.get('dateRange', settings.TEXCAVATOR_DATE_RANGE)
dates = daterange2dates(date_range_str)
distributions = []
for ds in _KB_DISTRIBUTION_VALUES.keys():
use_ds = json.loads(req_dict.get(ds, "true"))
if not use_ds:
distributions.append(ds)
article_types = []
for typ in _KB_ARTICLE_TYPE_VALUES:
use_type = json.loads(req_dict.get(typ, "true"))
if not use_type:
article_types.append(typ)
pillars = [int(x) for x in req_dict.getlist('pillars')]
collection = req_dict.get('collection', settings.ES_INDEX)
sort_order = req_dict.get('sort_order', '_score')
return {
'query': query_str,
'start': start,
'result_size': result_size,
'dates': dates,
'distributions': distributions,
'article_types': article_types,
'pillars': pillars,
'collection': collection,
'sort_order': sort_order
}
def get_document_ids(idx, typ, query, date_ranges, exclude_distributions=[],
exclude_article_types=[], selected_pillars=[]):
"""Return a list of document ids and dates for a query
"""
doc_ids = []
q = create_query(query, date_ranges, exclude_distributions,
exclude_article_types, selected_pillars)
date_field = 'paper_dc_date'
fields = [date_field]
get_more_docs = True
start = 0
num = 2500
while get_more_docs:
results = _es().search(index=idx, doc_type=typ, body=q, fields=fields,
from_=start, size=num)
for result in results['hits']['hits']:
doc_ids.append(
{
'identifier': result['_id'],
'date': datetime.strptime(result['fields'][date_field][0],
'%Y-%m-%d').date()
})
start += num
if len(results['hits']['hits']) < num:
get_more_docs = False
return doc_ids
def document_id_chunks(chunk_size, idx, typ, query, date_ranges, dist=[],
art_types=[], selected_pillars=[]):
"""Generator for retrieving document ids for all results of a query.
Used by the generate_tv_cloud task.
"""
q = create_query(query, date_ranges, dist, art_types, selected_pillars)
get_more_docs = True
start = 0
fields = []
while get_more_docs:
results = _es().search(index=idx, doc_type=typ, body=q, from_=start,
fields=fields, size=chunk_size)
yield [result['_id'] for result in results['hits']['hits']]
start = start + chunk_size
if len(results['hits']['hits']) < chunk_size:
get_more_docs = False
def day_statistics(idx, typ, date_range, agg_name):
"""Gather day statistics for all dates in the date range
This function is used by the gatherstatistics management command.
"""
q = create_day_statistics_query(date_range, agg_name)
results = _es().search(index=idx, doc_type=typ, body=q, size=0)
if 'took' in results:
return results
return None
def metadata_aggregation(idx, typ, query, date_ranges,
exclude_distributions, exclude_article_types, selected_pillars):
body = create_query(query, date_ranges,
exclude_distributions, exclude_article_types, selected_pillars)
body['aggs'] = metadata_dict()
return _es().search(index=idx, doc_type=typ, body=body, search_type='count')
def metadata_dict():
return {
"distribution": {
"terms": {
"field": "paper_dcterms_spatial"
}
},
"articletype": {
"terms": {
"field": "article_dc_subject"
}
},
"newspaper_ids": {
"terms": {
"field": "paper_dc_identifier"
}
},
"newspapers": {
"terms": {
"field": "paper_dc_title.raw",
"size": 10
}
}
}
def get_cloud_fields(stems=False):
"""
:param stems: Whether or not to use the stemmed versions of the fields.
:return: The fields on which the word cloud has to be created
"""
fields = [_DOCUMENT_TEXT_FIELD, _DOCUMENT_TITLE_FIELD]
if stems and settings.STEMMING_AVAILABLE:
fields = [f + '.stemmed' for f in fields]
return fields
def get_stemmed_form(idx, word):
"""
Returns the stemmed form of a word for this
Parameters:
idx : str
The name of the elasticsearch index
word : str
The input word
"""
result = indices.IndicesClient(_es()).analyze(index=idx, text=word, analyzer=_STEMMING_ANALYZER)
return result['tokens'][0]['token']
| apache-2.0 |
myerpengine/odoo | addons/account_budget/wizard/__init__.py | 444 | 1196 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_budget_crossovered_report
import account_budget_analytic
import account_budget_crossovered_summary_report
import account_budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chand3040/cloud_that | lms/djangoapps/verify_student/migrations/0009_auto__change_softwaresecurephotoverification_window_id_default_none.py | 84 | 9796 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self):
# Changing field 'SoftwareSecurePhotoVerification.window'. Setting its default value to None
if db.backend_name == 'mysql':
db.execute('ALTER TABLE verify_student_softwaresecurephotoverification CHANGE `window_id` `window_id` int(11) DEFAULT NULL;')
def backwards(self, orm):
# Changing field 'SoftwareSecurePhotoVerification.window'
db.alter_column('verify_student_softwaresecurephotoverification', 'window_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['reverification.MidcourseReverificationWindow']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.incoursereverificationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'InCourseReverificationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'verify_student.skippedreverification': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'SkippedReverification'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skipped_checkpoint'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'42ae367f-f6eb-456b-84c8-a3fd2baf4208'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['reverification.MidcourseReverificationWindow']"})
},
'verify_student.verificationcheckpoint': {
'Meta': {'unique_together': "(('course_id', 'checkpoint_location'),)", 'object_name': 'VerificationCheckpoint'},
'checkpoint_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'null': 'True', 'to': "orm['verify_student.SoftwareSecurePhotoVerification']"})
},
'verify_student.verificationstatus': {
'Meta': {'object_name': 'VerificationStatus'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkpoint_status'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student']
| agpl-3.0 |
chongtianfeiyu/kbengine | kbe/tools/xlsx2py/xlsx2py/py2excel.py | 35 | 9561 | # -*- coding: utf-8 -*-
"""
*****************************************************************************************
使用方法:python py2excel pyfile(utf-8) readexcelfile, writeexcelfile
pyfile请使用utf-8,不支持ANSI, py中的应有字典datas, allDatas(py文件至少有datas)
readexcelfile为生成py文件的母表
writeexcelfile 为导出的xlsx文件
*****************************************************************************************
"""
import sys
import os
import xlsxtool
from ExcelTool import ExcelTool
from config import *
class Sheet(object):
"""
简表
"""
@property
def sheet(self):
return self.__parentBook.getSheetByIndex(self.__index)
def __init__(self, parentBook, sheetIndex):
self.__parentBook = parentBook
self.__index = sheetIndex
def getIndex(self):
return self.__index
def getParentBook(self):
return self.__parentBook
def getColCount(self):
return self.__parentBook.getColCount(self.__index)
def getColValues(self, col):
return self.__parentBook.getColValues(self.sheet, col)
def getRowValues(self, row):
return self.__parentBook.getRowValues(self.sheet, row)
class py2excel(object):
"""
"""
def __init__(self, pyfile, sourcefile, dstfile):
"""
pyfile:py, sourcefile:source excel, excel:dest excel
"""
self.pyfile = os.path.abspath(pyfile)
if sourcefile == '':
self.sourcefile = sourcefile
else:
self.sourcefile = os.path.abspath(sourcefile)
self.dstfile = os.path.abspath(dstfile)
self.xlsx = None
self.xbook = None
self.sheetCNames = {}
self.sheetENames = {}
self.mapSheet = {}
####################导入py文件#######################
def importPyModule(self):
"""
import self.pyfile as python module
"""
self.pyModule = None
try:
sys.path.append(PY_MODULE_PATH)
except NameError:
pass
#try:
pyPath, filename= os.path.split(self.pyfile)
pypos = filename.strip().rfind(".py")
if pypos < 0:
print( "pypypypypypypypy")
else:
filename = filename[:pypos]
sys.path.append(pyPath)
#try:
self.pyModule = __import__(filename)
#except:
#print( 'import %s' %(self.pyfile))
#sys.exit(1)
sys.path.pop(-1)
sys.path.pop(-1)
def getSheetNameFromModule(self):
if hasattr(self.pyModule, 'allDatas'):
return self.pyModule.allDatas
else:
return None
############################从策划表读取信息#######################################
def openXlsx(self):
if xlsxtool.checkExtName(self.sourcefile, '.xlsx') or xlsxtool.checkExtName(self.sourcefile, ".xls"):
self.xbook = ExcelTool(self.sourcefile)
if not self.xbook.getWorkbook():
print( "打开文件失败" )
return
self.xlsx = self.xbook.getXLSX()
def getSheetCNames(self):
allDatas = self.getSheetNameFromModule()
sheetCNames = {}
for index in range(1, self.xbook.getSheetCount() + 1):
sheetName = self.xbook.getSheetNameByIndex(index)
if sheetName.startswith(EXPORT_PREFIX_CHAR):
if allDatas is None:
sheetCNames[index] = sheetName
elif sheetName[1:].encode("utf-8") in allDatas: #py文件编码认为是utf-8
sheetCNames[index] = sheetName
if len(sheetCNames) == 0:
print( 'no sheet' )
self.xbook.close()
sys.exit(1)
if allDatas is None and len(sheetCNames) > 1: #这个主要处理,没有allDatas的时候
for k,v in sheetCNames.iteritems():
print( "%d:%s"%(k,v) )
while True:
ii = raw_input('input your choice:')
try:
ii = int(ii)
except:
continue
if ii > 0 and ii < len(sheetCNames):
print( sheetCNames[ii] )
self.sheetCNames[ii] = sheetCNames[ii]
break
else:
self.sheetCNames = sheetCNames
def readXlsxHeader(self):
"""
读取中英文对照
"""
if self.xlsx is None:
print( "no file opened" )
self.names = {} #sn:表的中文名字,engName,chnName:字典key的英文(中文)名字,
for si, sn in self.sheetCNames.iteritems(): #chinese name of sheetname, sheetindex
sheet = Sheet(self.xbook, si)
self.names[sn] = {}
tmpEInt = 1
tmpCInt = 1
for (engStruct, chnName) in zip(sheet.getRowValues(EXPORT_DEFINE_ROW -1), sheet.getRowValues(EXPORT_DEFINE_ROW)):
if engStruct.find('['):
engName = engStruct[:engStruct.find('[')]
else:
engName = 'undefineE_%d'%(tmpEInt,)
tmpEInt += 1
if chnName is None:
chnName = 'undefineC_%d'%(tmpCInt,)
tmpCInt += 1
self.names[sn][engName] = chnName
self.sheet = None
self.xbook.close() #覆盖的时候这是必须的
self.xbook = None
return self.names
def writeNewXlsx(self):
"""
py的字典写入到xlsx
"""
def getWorkbook():
dirs, filename = os.path.split(self.dstfile)
if not os.path.isdir(dirs):
os.makedirs(dirs)
return ExcelTool(self.dstfile)
if self.xbook is not None:
self.xbook.close()
self.xbook = None
self.xbook = getWorkbook()
if os.path.isfile(self.dstfile):
self.xbook.getWorkbook(forcedClose = True)
if self.xbook.getXApp().Workbooks.Count == 0:
newWB = self.xbook.getXApp().Workbooks.Add()
newWB.SaveAs(self.dstfile)
newWB.Close()
if self.xbook.getXLSX() is None:
if not self.xbook.getWorkbook(forcedClose =True):
print( "unknow error" )
return
if self.sourcefile != '':
self.writeXlsxWithC()
else:
self.writeXlsxWithoutC() #没有中文
def writeXlsxWithoutC(self): #没有中文
self.parseWriteSheet('datas')
data = None
if hasattr(self.pyModule, 'datas'):
data = self.pyModule.datas
if data is None:
return
headerKeys = self.getHeaderKeys(data)
self.newSheet = self.getWriteSheet('datas')
self.writeXlsxHeader(headerKeys)
self.writeData2Cells(data, headerKeys)
self.xbook.close(saveChanges = True)
def writeXlsxWithC(self): #有中文的文件
cnames = self.names.keys()
self.parseWriteSheet(cnames)
for cname, e2cDict in self.names.iteritems():
self.newSheet = self.getWriteSheet(cname)
self.newSheet.UsedRange = None #清空表的内容
data = None
if self.getSheetNameFromModule() is not None:
if cname[1:].encode("utf-8") not in self.getSheetNameFromModule():
continue
else:
data = self.getSheetNameFromModule()[cname[1:].encode("utf-8")]
elif hasattr(self.pyModule, 'datas'):
data = self.pyModule.datas
if data is None or not isinstance(data, dict):
continue
headerKeys = self.getHeaderKeys(data)
headerCNames = []
for p, he in enumerate(headerKeys):
cname = e2cDict.get(he, "py_%s"%(str(he),))
headerCNames.append(cname)
self.writeXlsxHeader(headerCNames)
self.writeData2Cells(data, headerKeys)
self.xbook.close(saveChanges = True)
def writeXlsxHeader(self, headerCNames):
"""
写到导出xlsx的第一行
"""
for pos, cn in enumerate(headerCNames): #ANSI编码
self.newSheet.Cells(1, pos+1).Value = cn
def writeData2Cells(self, data, headerKeys):
"""
字典的数据写入到excel中
"""
if self.newSheet is None:
return
for vp, v in enumerate(data.itervalues()): #value include key
for p, he in enumerate(headerKeys):
text = self.convertType(v.get(he, ''))
self.newSheet.Cells(vp+2, p+1).Value = text
return
def getHeaderKeys(self, data):
headerKeys = []
for v in data.itervalues(): #{1111:{'key':values,,}}
for vk in v.keys():
if vk not in headerKeys:
headerKeys.append(vk)
return headerKeys
def getWriteSheet(self, cname):
"""
从workbook选取所要写入数据的sheet
"""
if cname in self.repeatUse:
newSheet = self.xbook.getSheetByIndex(self.repeatUse.pop(cname))
elif len(self.useless) > 0:
newSheet = self.xbook.getSheetByIndex(self.useless.pop(-1))
newSheet.Name = cname
else:
newSheet = self.xbook.getXLSX().Sheets.Add()
newSheet.Name = cname
return newSheet
def parseWriteSheet(self, cnames):
"""
对即将要写的表做一些分析,保证一些表依旧存在
"""
self.repeatUse = {} #表需要覆盖
self.useless = [] #这些表被看做无用,需要新表的时候从这里取
for index in range(1, self.xbook.getSheetCount()+1):
name = self.xbook.getSheetNameByIndex(index)
if name in cnames:
self.repeatUse[name] = index
else:
self.useless.append(index)
return
def convertType(self, val):
"""
类型转换
"""
if isinstance(val, str):
return val.decode("utf-8")
elif isinstance(val, (dict, list, tuple)):
return xlsxtool.value_to_text(val)
return val
def run(self):
self.importPyModule()
if self.sourcefile != '':
self.openXlsx()
self.getSheetCNames()
self.readXlsxHeader()
self.writeNewXlsx()
if __name__ == '__main__':
if len(sys.argv[1:]) == 2: #没有中文表
pyfile, dstfile = sys.argv[1:]
a = py2excel(pyfile, '', dstfile)
a.run()
elif len(sys.argv[1:]) == 3: #有中文表
pyfile, sourcefile, dstfile = sys.argv[1:]
if False in map(lambda x:os.path.isfile(x.decode('gb2312')), sys.argv[1:3]):
print( '文件呢?'.decode("utf-8") )
sys.exit(1)
a = py2excel(pyfile, sourcefile, dstfile)
a.run()
else:
print( __doc__.decode("utf-8") )
sys.exit(1)
| lgpl-3.0 |
cristianquaglio/odoo | addons/payment_adyen/controllers/main.py | 48 | 2053 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class AdyenController(http.Controller):
_return_url = '/payment/adyen/return/'
@http.route([
'/payment/adyen/return',
], type='http', auth='none')
def adyen_return(self, **post):
_logger.info('Beginning Adyen form_feedback with post data %s', pprint.pformat(post)) # debug
if post.get('authResult') not in ['CANCELLED']:
request.registry['payment.transaction'].form_feedback(request.cr, SUPERUSER_ID, post, 'adyen', context=request.context)
return_url = post.pop('return_url', '')
if not return_url:
custom = json.loads(post.pop('merchantReturnData', '{}'))
return_url = custom.pop('return_url', '/')
return werkzeug.utils.redirect(return_url)
@http.route([
'/payment/adyen/notification',
], type='http', auth='none', methods=['POST'])
def adyen_notification(self, **post):
tx_id = post.get('merchantReference') and request.registry['payment.transaction'].search(request.cr, SUPERUSER_ID, [('reference', 'in', [post.get('merchantReference')])], limit=1, context=request.context)
if post.get('eventCode') in ['AUTHORISATION'] and tx_id:
tx = request.registry['payment.transaction'].browse(request.cr, SUPERUSER_ID, tx_id, context=request.context)
states = (post.get('merchantReference'), post.get('success'), tx.state)
if (post.get('success') == 'true' and tx.state == 'done') or (post.get('success') == 'false' and tx.state in ['cancel', 'error']):
_logger.info('Notification from Adyen for the reference %s: received %s, state is %s', states)
else:
_logger.warning('Notification from Adyen for the reference %s: received %s but state is %s', states)
return '[accepted]'
| apache-2.0 |
jat255/hyperspy | hyperspy/_components/pes_core_line_shape.py | 2 | 3096 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
from hyperspy.component import Component
sqrt2pi = np.sqrt(2 * np.pi)
class PESCoreLineShape(Component):
"""
"""
def __init__(self, A=1., FWHM=1., origin=0.):
Component.__init__(self, ['A', 'FWHM', 'origin', 'ab', 'shirley'])
self.shirley.free = False
self.ab.value = 0
self.ab.free = False
self.A.value = A
self.FWHM.value = FWHM
self.origin.value = origin
self._position = self.origin
# Boundaries
self.A.bmin = 0.
self.A.bmax = None
self.FWHM.bmin = None
self.FWHM.bmax = None
self.isbackground = False
self.convolved = True
# Gradients
self.A.grad = self.grad_A
self.FWHM.grad = self.grad_FWHM
self.origin.grad = self.grad_origin
self.ab.grad = self.grad_ab
# Options
self.factor = 1.
self.Shirley = False
def function(self, x):
"""
Given an one dimensional array x containing the energies at which
you want to evaluate the background model, returns the background
model for the current parameters.
"""
a0 = self.A.value
a1 = self.origin.value
a2 = self.FWHM.value
a3 = self.ab.value
k = self.shirley.value
f = self.factor * a0 * \
np.exp(-1 * math.log(2) * ((x - (a1 - a3)) / a2) ** 2)
if self.Shirley:
cf = np.cumsum(f)
cf = cf[-1] - cf
self.cf = cf
return cf * k + f
else:
return f
def grad_A(self, x):
return self.function(x) / self.A.value
def grad_FWHM(self, x):
a0 = self.A.value
a1 = self.origin.value
a2 = self.FWHM.value
a3 = self.ab.value
return self.factor * (2 * math.log(2) * a0 * (x + a3 - a1) ** 2 *
np.exp(-(math.log(2) * (x + a3 - a1) ** 2) / a2 ** 2)) / a2 ** 3
def grad_origin(self, x):
a0 = self.A.value
a1 = self.origin.value
a2 = self.FWHM.value
a3 = self.ab.value
return self.factor * (2 * math.log(2) * a0 * (x + a3 - a1) *
np.exp(-(math.log(2) * (x + a3 - a1) ** 2) / a2 ** 2)) / a2 ** 2
def grad_ab(self, x):
return -self.grad_origin(x)
| gpl-3.0 |
Leoniela/nipype | nipype/interfaces/fsl/tests/test_auto_Complex.py | 9 | 3807 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import Complex
def test_Complex_inputs():
input_map = dict(args=dict(argstr='%s',
),
complex_cartesian=dict(argstr='-complex',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
complex_in_file=dict(argstr='%s',
position=2,
),
complex_in_file2=dict(argstr='%s',
position=3,
),
complex_merge=dict(argstr='-complexmerge',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge', 'start_vol', 'end_vol'],
),
complex_out_file=dict(argstr='%s',
genfile=True,
position=-3,
xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_out_file', 'imaginary_out_file', 'real_polar', 'real_cartesian'],
),
complex_polar=dict(argstr='-complexpolar',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
complex_split=dict(argstr='-complexsplit',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
end_vol=dict(argstr='%d',
position=-1,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
imaginary_in_file=dict(argstr='%s',
position=3,
),
imaginary_out_file=dict(argstr='%s',
genfile=True,
position=-3,
xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_polar', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
magnitude_in_file=dict(argstr='%s',
position=2,
),
magnitude_out_file=dict(argstr='%s',
genfile=True,
position=-4,
xor=['complex_out_file', 'real_out_file', 'imaginary_out_file', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
output_type=dict(),
phase_in_file=dict(argstr='%s',
position=3,
),
phase_out_file=dict(argstr='%s',
genfile=True,
position=-3,
xor=['complex_out_file', 'real_out_file', 'imaginary_out_file', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
real_cartesian=dict(argstr='-realcartesian',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
real_in_file=dict(argstr='%s',
position=2,
),
real_out_file=dict(argstr='%s',
genfile=True,
position=-4,
xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_polar', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
real_polar=dict(argstr='-realpolar',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
start_vol=dict(argstr='%d',
position=-2,
),
terminal_output=dict(nohash=True,
),
)
inputs = Complex.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Complex_outputs():
output_map = dict(complex_out_file=dict(),
imaginary_out_file=dict(),
magnitude_out_file=dict(),
phase_out_file=dict(),
real_out_file=dict(),
)
outputs = Complex.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
hkmshb/bottle | test/test_formsdict.py | 11 | 1176 | # -*- coding: utf-8 -*-
# '瓶' means "Bottle"
import unittest
from bottle import FormsDict, touni, tob
class TestFormsDict(unittest.TestCase):
def test_attr_access(self):
""" FomsDict.attribute returs string values as unicode. """
d = FormsDict(py2=tob('瓶'), py3=tob('瓶').decode('latin1'))
self.assertEqual(touni('瓶'), d.py2)
self.assertEqual(touni('瓶'), d.py3)
def test_attr_missing(self):
""" FomsDict.attribute returs u'' on missing keys. """
d = FormsDict()
self.assertEqual(touni(''), d.missing)
def test_attr_unicode_error(self):
""" FomsDict.attribute returs u'' on UnicodeError. """
d = FormsDict(latin=touni('öäüß').encode('latin1'))
self.assertEqual(touni(''), d.latin)
d.input_encoding = 'latin1'
self.assertEqual(touni('öäüß'), d.latin)
def test_decode_method(self):
d = FormsDict(py2=tob('瓶'), py3=tob('瓶').decode('latin1'))
d = d.decode()
self.assertFalse(d.recode_unicode)
self.assertTrue(hasattr(list(d.keys())[0], 'encode'))
self.assertTrue(hasattr(list(d.values())[0], 'encode'))
| mit |
bjmnbraun/icestick_fastio | thirdparty/magma/magma/verilog.py | 1 | 5252 | import types
import operator
from collections import OrderedDict, Sequence
from magma.port import INPUT, OUTPUT, INOUT, flip
from magma.ref import DefnRef
from magma.t import IntegerTypes
from magma.bit import BitType, VCC, GND
from magma.array import ArrayKind, ArrayType
from magma.circuit import *
from magma.wire import wiredefaultclock
#__all__ = ['hstr', 'bstr']
__all__ = ['hstr']
# return the hex character for int n
def hex(n):
if n < 10: return chr(ord('0')+n)
else: return chr(ord('A')+n-10)
# return a hex string reprenting n
def hstr(n, bits):
format = "%d'h" % bits
nformat = []
n &= (1 << bits)-1
for i in range((bits+3)/4):
nformat.append(n%16)
n /= 16
nformat.reverse()
return format + reduce(operator.add, map(hex, nformat))
def bstr(n, bits):
if bits == 1:
return "1'b1" if init else "1'b0"
format = "%d'b" % bits
nformat = []
n &= (1 << bits)-1
for i in range(bits):
nformat.append(n%2)
n /= 2
nformat.reverse()
return format + reduce(operator.add, map(hex, nformat))
# return the verilog name of a data value
def vname(t):
if t is VCC: return "1'b1"
if t is GND: return "1'b0"
if isinstance(t, ArrayType):
#print str(t), t.iswhole(t.ts)
if not t.iswhole(t.ts):
# the sequence of values is concantenated
t = [vname(i) for i in t.ts]
t.reverse()
return '{' + ','.join(t) + '}'
assert not t.anon()
return t.name.qualifiedname(sep='_')
# return the verilog declaration for the data type
def vdecl(t):
if isinstance(t, ArrayType):
return '[%d:%d]' % (t.N-1, 0)
else:
assert isinstance(t, BitType)
return ""
# return the verilog module args
def vmoduleargs(self):
args = []
for name, port in self.ports.items():
if port.isinput(): d = OUTPUT
elif port.isoutput(): d = INPUT
else: d = INOUT
#d = flip(port.direction)
args.append( "%s %s %s" % (d, vdecl(port), name) )
return args
def compileinstance(self):
#print 'compileinstance', str(self)
def arg(k,v):
if not isinstance(v, str): v = str(v)
return '.%s(%s)' % (k, v)
args = []
for k, v in self.interface.ports.items():
#print('arg', k, v,)
if v.isinput():
# find the output connected to v
w = v.value()
if not w:
print('Warning (verilog): input', str(v), 'not connected to an output')
continue
v = w
if isinstance(k, IntegerTypes):
args.append( vname(v) )
else:
args.append( arg(k,vname(v)) )
params = []
for k, v in self.kwargs.items():
if k != 'loc':
if isinstance(v, tuple):
v = hstr(v[0], v[1])
params.append(arg(k, v))
#s = '(* loc="%d,%d/%d" *)\n' % self.loc if self.loc else ""
s = str(self.__class__.__name__)
if len(params):
if len(params) > 2:
s += ' #(' + ",\n".join(params) + ')'
else:
s += ' #(' + ", ".join(params) + ')'
s += ' ' + str(self.name)
return '%s (%s)' % (s, ', '.join(args))
def compiledefinition(cls):
# for now only allow Bit or Array(n, Bit)
for name, port in cls.interface.ports.items():
if isinstance(port, ArrayKind):
if not isinstance(port.T, BitKind):
print('Error: Argument', port, 'must be a an Array(n,Bit)')
assert True
args = ', '.join(vmoduleargs(cls.interface))
s = 'module %s (%s);\n' % (cls.__name__, args)
import re
if cls.verilog:
s += cls.verilog + '\n'
if cls.verilogLib:
for libName in cls.verilogLib:
if re.search("\.v$",libName):
with open(libName,'r') as libFile:
s = libFile.read() + s
else:
s = libName + s
else:
# declare a wire for each instance output
for instance in cls.instances:
for port in instance.interface.ports.values():
if port.isoutput():
s += 'wire %s %s;\n' % (vdecl(port), vname(port))
#print 'compile instances'
# emit the structured verilog for each instance
for instance in cls.instances:
wiredefaultclock(cls, instance)
s += compileinstance(instance) + ';\n'
# assign to module output arguments
for input in cls.interface.inputs():
output = input.value()
if output:
iname = vname(input)
oname = vname(output)
s += 'assign %s = %s;\n' % (iname, oname)
s += "endmodule\n"
return s
def find(circuit, defn):
name = circuit.__name__
if not isdefinition(circuit):
return defn
for i in circuit.instances:
find(type(i), defn)
if name not in defn:
defn[name] = circuit
return defn
def compile(main):
defn = find(main,OrderedDict())
code = ''
for k, v in defn.items():
print('compiling', k)
code += compiledefinition(v) + '\n'
return code
| mit |
Matheo13/NsPortal | Back/setup.py | 2 | 1240 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pypyodbc',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_tm',
'sqlalchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
'webtest'
]
setup(name='ns_portal',
version='0.0',
description='ns_portal',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='ns_portal',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = ns_portal:main
[console_scripts]
initialize_ns_portal_db = ns_portal.scripts.initializedb:main
""",
)
| mit |
mdhaman/superdesk-core | superdesk/attachments.py | 6 | 1537 |
import os
import superdesk
from flask import current_app
from werkzeug.utils import secure_filename
from apps.auth import get_user_id
RESOURCE = 'attachments'
class AttachmentsResource(superdesk.Resource):
schema = {
'media': {'type': 'media'},
'mimetype': {'type': 'string'},
'filename': {'type': 'string'},
'length': {'type': 'integer'},
'title': {'type': 'string'},
'description': {'type': 'string'},
'user': superdesk.Resource.rel('users'),
}
item_methods = ['GET', 'PATCH']
resource_methods = ['GET', 'POST']
privileges = {'POST': 'archive', 'PATCH': 'archive'}
class AttachmentsService(superdesk.Service):
def on_create(self, docs):
for doc in docs:
doc['user'] = get_user_id()
if doc.get('media'):
media = current_app.media.get(doc['media'], RESOURCE)
doc.setdefault('filename', secure_filename(os.path.basename(getattr(media, 'filename'))))
doc.setdefault('mimetype', getattr(media, 'content_type'))
doc.setdefault('length', getattr(media, 'length'))
def on_deleted(self, doc):
current_app.media.delete(doc['media'], RESOURCE)
def init_app(app):
superdesk.register_resource(RESOURCE, AttachmentsResource, AttachmentsService)
app.client_config['attachments_max_files'] = app.config.get('ATTACHMENTS_MAX_FILES', 10)
app.client_config['attachments_max_size'] = app.config.get('ATTACHMENTS_MAX_SIZE', 2 ** 20 * 8) # 8MB
| agpl-3.0 |
ylatuya/gst-plugins-good | tests/examples/rtp/client-PCMA.py | 16 | 4269 | #! /usr/bin/env python
import pygst
pygst.require("0.10")
import gst
import gobject
#
# A simple RTP receiver
#
# receives alaw encoded RTP audio on port 5002, RTCP is received on port 5003.
# the receiver RTCP reports are sent to port 5007
#
# .-------. .----------. .---------. .-------. .--------.
# RTP |udpsrc | | rtpbin | |pcmadepay| |alawdec| |alsasink|
# port=5002 | src->recv_rtp recv_rtp->sink src->sink src->sink |
# '-------' | | '---------' '-------' '--------'
# | |
# | | .-------.
# | | |udpsink| RTCP
# | send_rtcp->sink | port=5007
# .-------. | | '-------' sync=false
# RTCP |udpsrc | | | async=false
# port=5003 | src->recv_rtcp |
# '-------' '----------'
AUDIO_CAPS = 'application/x-rtp,media=(string)audio,clock-rate=(int)8000,encoding-name=(string)PCMA'
AUDIO_DEPAY = 'rtppcmadepay'
AUDIO_DEC = 'alawdec'
AUDIO_SINK = 'autoaudiosink'
DEST = '127.0.0.1'
RTP_RECV_PORT = 5002
RTCP_RECV_PORT = 5003
RTCP_SEND_PORT = 5007
#gst-launch -v gstrtpbin name=rtpbin \
# udpsrc caps=$AUDIO_CAPS port=$RTP_RECV_PORT ! rtpbin.recv_rtp_sink_0 \
# rtpbin. ! rtppcmadepay ! alawdec ! audioconvert ! audioresample ! autoaudiosink \
# udpsrc port=$RTCP_RECV_PORT ! rtpbin.recv_rtcp_sink_0 \
# rtpbin.send_rtcp_src_0 ! udpsink port=$RTCP_SEND_PORT host=$DEST sync=false async=false
def pad_added_cb(rtpbin, new_pad, depay):
sinkpad = gst.Element.get_static_pad(depay, 'sink')
lres = gst.Pad.link(new_pad, sinkpad)
# the pipeline to hold eveything
pipeline = gst.Pipeline('rtp_client')
# the udp src and source we will use for RTP and RTCP
rtpsrc = gst.element_factory_make('udpsrc', 'rtpsrc')
rtpsrc.set_property('port', RTP_RECV_PORT)
# we need to set caps on the udpsrc for the RTP data
caps = gst.caps_from_string(AUDIO_CAPS)
rtpsrc.set_property('caps', caps)
rtcpsrc = gst.element_factory_make('udpsrc', 'rtcpsrc')
rtcpsrc.set_property('port', RTCP_RECV_PORT)
rtcpsink = gst.element_factory_make('udpsink', 'rtcpsink')
rtcpsink.set_property('port', RTCP_SEND_PORT)
rtcpsink.set_property('host', DEST)
# no need for synchronisation or preroll on the RTCP sink
rtcpsink.set_property('async', False)
rtcpsink.set_property('sync', False)
pipeline.add(rtpsrc, rtcpsrc, rtcpsink)
# the depayloading and decoding
audiodepay = gst.element_factory_make(AUDIO_DEPAY, 'audiodepay')
audiodec = gst.element_factory_make(AUDIO_DEC, 'audiodec')
# the audio playback and format conversion
audioconv = gst.element_factory_make('audioconvert', 'audioconv')
audiores = gst.element_factory_make('audioresample', 'audiores')
audiosink = gst.element_factory_make(AUDIO_SINK, 'audiosink')
# add depayloading and playback to the pipeline and link
pipeline.add(audiodepay, audiodec, audioconv, audiores, audiosink)
res = gst.element_link_many(audiodepay, audiodec, audioconv, audiores, audiosink)
# the rtpbin element
rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin')
pipeline.add(rtpbin)
# now link all to the rtpbin, start by getting an RTP sinkpad for session 0
srcpad = gst.Element.get_static_pad(rtpsrc, 'src')
sinkpad = gst.Element.get_request_pad(rtpbin, 'recv_rtp_sink_0')
lres = gst.Pad.link(srcpad, sinkpad)
# get an RTCP sinkpad in session 0
srcpad = gst.Element.get_static_pad(rtcpsrc, 'src')
sinkpad = gst.Element.get_request_pad(rtpbin, 'recv_rtcp_sink_0')
lres = gst.Pad.link(srcpad, sinkpad)
# get an RTCP srcpad for sending RTCP back to the sender
srcpad = gst.Element.get_request_pad(rtpbin, 'send_rtcp_src_0')
sinkpad = gst.Element.get_static_pad(rtcpsink, 'sink')
lres = gst.Pad.link(srcpad, sinkpad)
rtpbin.connect('pad-added', pad_added_cb, audiodepay)
gst.Element.set_state(pipeline, gst.STATE_PLAYING)
mainloop = gobject.MainLoop()
mainloop.run()
gst.Element.set_state(pipeline, gst.STATE_NULL)
| lgpl-2.1 |
fxstein/pubkey | setup.py | 1 | 4472 | #!/usr/local/bin/python3 -u
#
# The MIT License (MIT)
#
# Copyright (c) 2015 by Oliver Ratzesberger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
# Make sure correct version of python is being used.
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and
sys.version_info[1] < 4):
print("pubkey requires Python 3.4 or later.")
sys.exit(1)
sys.path.insert(0, os.path.abspath('pubkey'))
from pubkey import __version__, __author__, __email__, __license__
# For development versions add current timestamp of build. We could use git
# but that would required a commit for every change before being able to test
# the build locally
if __version__.find('dev') is not -1:
import time
__version__ = __version__ + time.strftime('%Y%m%d%H%M%S')
setup(
name='pubkey',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__, # noqa
description='Public Key Distribution made simple.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/fxstein/pubkey',
# Author details
author=__author__,
author_email=__email__,
# Choose your license
license=__license__,
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Systems Administration',
'Topic :: Security :: Cryptography',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
],
# What does your project relate to?
keywords='public private keys pairs security ssh rest asyncio cement',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'asyncio>=3.4.0',
'aiohttp>=0.17.0',
'cement>=2.6.0',
'colorlog>=2.6.0',
],
scripts=[
'pubkey/pubkey'
]
)
| mit |
YangSongzhou/django | django/utils/ipv6.py | 208 | 7967 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. https://github.com/google/ipaddr-py
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| bsd-3-clause |
docprofsky/meggyjr-cncserver | cncserverclient.py | 1 | 6717 | import logging
import json
import os
import requests
class CNCServerClient:
"""
Connects to CNCServer and sends commands to the WaterColorBot for drawing purpouses
"""
hasConnection = False
def __init__(self, cncserver_address="http://localhost:4242", cncserver_args=""):
# Create Logging instance
self.logger = logging.getLogger('CNCClient')
self.logger.debug('Client instance created!')
self.cncserver_address = cncserver_address
self.cncserver_args = cncserver_args
# Attempt to connect to an already running CNCServer instance
try:
r = requests.get(self.cncserver_address+'/v1/settings/global', timeout = 1)
self.hasConnection = True
except requests.exceptions.ConnectionError as er:
# If we cannot connect, send an error message
self.logger.critical('Could not create connection to external server!')
self.hasConnection = False
# And start our own internal CNCServer
if self.launchCncServer():
self.hasConnection = True
def setPenPos(self,x,y):
"""
Set the position of the robot's implement
"""
if not self.hasConnection: return
# Assemble packet and compress it into a JSON formatted string
data = {'x':str(x),'y':str(y)}
data_json = json.dumps(data)
try:
# Send the pen data to the server
r = requests.put(self.cncserver_address+'/v1/pen/', data=data, timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def setPenHeight(self, height):
"""
Set the position of the robot's implement
"""
if not self.hasConnection: return
# Assemble packet and compress it into a JSON formatted string
data = {'state': str(height)}
data_json = json.dumps(data)
try:
# Send the pen data to the server
r = requests.put(self.cncserver_address+'/v1/pen/', data=data, timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def setPenSpeed(self, speed_drawing, speed_moving):
"""
Set the speed of the robot
"""
if not self.hasConnection: return
# Assemble packet and compress it into a JSON formatted string
data = {"speed:drawing": speed_drawing, "speed:moving": speed_moving}
data_json = json.dumps(data)
try:
# Send the pen data to the server
r = requests.put(self.cncserver_address+'/v1/settings/bot', data=data, timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def getPenStatus(self):
try:
# Send the pen data to the server
r = requests.get(self.cncserver_address+'/v1/pen/', timeout = 0.01).text
except requests.exceptions.ReadTimeout:
r = "{}"
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
r = "{}"
return json.loads(r)
def setPenPosScaled(self, pos, size):
"""
Sets the pen position to 'pos', but scaling it as a percentage of 'size'
"""
x = 100*(pos[0]/float(size[0]))
y = 100*(pos[1]/float(size[1]))
self.setPenPos(x,y)
def setTool(self, toolname):
try:
# Send the pen data to the server
r = requests.put(self.cncserver_address + '/v1/tools/' + toolname, timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def parkPen(self):
try:
# Send the request to the server
r = requests.delete(self.cncserver_address + '/v1/pen/', timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def unlockMotors(self):
try:
# Send the request to the server
r = requests.delete(self.cncserver_address + '/v1/motors/', timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def zeroPenPos(self):
command = {"reset": 1}
try:
# Send the request to the server
r = requests.put(self.cncserver_address + '/v1/motors/', data=command, timeout = 0.01)
except requests.exceptions.ReadTimeout: pass
except requests.exceptions.ConnectTimeout:
# Ignore timeouts on the returned status
pass
def launchCncServer(self):
"""
Looks for a built-in CNCServer instance at ../cncserver/ and launches it.
"""
# Check if CNCserver actually exists first
if os.path.exists("cncserver/cncserver.js"):
self.logger.info('Built-In CNCServer exists!')
# Start CNCServer as it's own process
self.serverProcess = subprocess.Popen(['node', 'cncserver.js', self.cncserver_args],
stdout=subprocess.PIPE,
cwd = 'cncserver')
# Create a new logging instance for CNCServer
serverLog = logging.getLogger('CNCServer')
# Start a thread to log the output from the thread
self.loggigngThread = threading.Thread(target=self._outputHandlingThread,
args = (serverLog, self.serverProcess,))
self.loggigngThread.start()
else:
self.logger.error('CNCServer not found at ../cncserver/cncserver.js')
def _outputHandlingThread(self,logger, serverInstance):
# Send output from the CNCServer thread to the log
while True:
# Read a line and strip the extra newline character. Blocking operation
line = serverInstance.stdout.readline().replace('\n','')
logger.info(line)
# If the output from CNCServer indicates it's ready, then we can send commands to it
if 'is ready to receive commands' in line: self.hasConnection = True
| gpl-3.0 |
hrjn/scikit-learn | sklearn/ensemble/base.py | 19 | 5168 | """
Base class for ensemble-based estimators.
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import numpy as np
import numbers
from ..base import clone
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..utils import _get_n_jobs, check_random_state
MAX_RAND_SEED = np.iinfo(np.int32).max
def _set_random_states(estimator, random_state=None):
"""Sets fixed random_state parameters for an estimator
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : numpy.RandomState or int, optional
Random state used to generate integer values.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
"""
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == 'random_state' or key.endswith('__random_state'):
to_set[key] = random_state.randint(MAX_RAND_SEED)
if to_set:
estimator.set_params(**to_set)
class BaseEnsemble(BaseEstimator, MetaEstimatorMixin):
"""Base class for all ensemble classes.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the ensemble is built.
n_estimators : integer
The number of estimators in the ensemble.
estimator_params : list of strings
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
"""
def __init__(self, base_estimator, n_estimators=10,
estimator_params=tuple()):
# Set parameters
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimator_params = estimator_params
# Don't instantiate estimators now! Parameters of base_estimator might
# still change. Eg., when grid-searching with the nested object syntax.
# self.estimators_ needs to be filled by the derived classes in fit.
def _validate_estimator(self, default=None):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):
raise ValueError("n_estimators must be an integer, "
"got {0}.".format(type(self.n_estimators)))
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators))
if self.base_estimator is not None:
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if self.base_estimator_ is None:
raise ValueError("base_estimator cannot be None")
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.base_estimator_)
estimator.set_params(**dict((p, getattr(self, p))
for p in self.estimator_params))
if random_state is not None:
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator
def __len__(self):
"""Returns the number of estimators in the ensemble."""
return len(self.estimators_)
def __getitem__(self, index):
"""Returns the index'th estimator in the ensemble."""
return self.estimators_[index]
def __iter__(self):
"""Returns iterator over estimators in the ensemble."""
return iter(self.estimators_)
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(_get_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs,
dtype=np.int)
n_estimators_per_job[:n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/linear_model/randomized_l1.py | 33 | 23358 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/gdal/driver.py | 411 | 2411 | # prerequisites imports
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Dr_* routines are relevant here.
class Driver(GDALBase):
"Wraps an OGR Data Source Driver."
# Case-insensitive aliases for OGR Drivers.
_alias = {'esri' : 'ESRI Shapefile',
'shp' : 'ESRI Shapefile',
'shape' : 'ESRI Shapefile',
'tiger' : 'TIGER',
'tiger/line' : 'TIGER',
}
def __init__(self, dr_input):
"Initializes an OGR driver on either a string or integer input."
if isinstance(dr_input, basestring):
# If a string name of the driver was passed in
self._register()
# Checking the alias dictionary (case-insensitive) to see if an alias
# exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the OGR driver by the string name.
dr = capi.get_driver_by_name(name)
elif isinstance(dr_input, int):
self._register()
dr = capi.get_driver(dr_input)
elif isinstance(dr_input, c_void_p):
dr = dr_input
else:
raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not dr:
raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input))
self.ptr = dr
def __str__(self):
"Returns the string name of the OGR Driver."
return capi.get_driver_name(self.ptr)
def _register(self):
"Attempts to register all the data source drivers."
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not self.driver_count: capi.register_all()
# Driver properties
@property
def driver_count(self):
"Returns the number of OGR data source drivers registered."
return capi.get_driver_count()
| gpl-3.0 |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/sqlalchemy/orm/session.py | 76 | 94634 | # orm/session.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import weakref
from .. import util, sql, engine, exc as sa_exc
from ..sql import util as sql_util, expression
from . import (
SessionExtension, attributes, exc, query,
loading, identity
)
from ..inspection import inspect
from .base import (
object_mapper, class_mapper,
_class_to_mapper, _state_mapper, object_state,
_none_set, state_str, instance_str
)
from .unitofwork import UOWTransaction
from . import state as statelib
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension', 'sessionmaker']
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol('ACTIVE')
PREPARED = util.symbol('PREPARED')
COMMITTED = util.symbol('COMMITTED')
DEACTIVE = util.symbol('DEACTIVE')
CLOSED = util.symbol('CLOSED')
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection`
objects, a corresponding :class:`.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called.
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute. When it is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
See also:
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
if self.session.dispatch.after_transaction_create:
self.session.dispatch.after_transaction_create(self.session, self)
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(self, prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed"):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s, (oldkey, newkey) in self._key_switches.items():
self.session.identity_map.discard(s)
s.key = oldkey
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s, discard_existing=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
for s in self._deleted:
s.session_id = None
self._deleted.clear()
def _connection_for_bind(self, bind):
self._assert_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare.")
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?")
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._state = DEACTIVE
break
else:
transaction._state = DEACTIVE
sess = self.session
if self.session._enable_transaction_accounting and \
not sess._is_clean():
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded.")
self._restore_snapshot(dirty_only=self.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot(dirty_only=self.nested)
self.session.dispatch.after_rollback(self.session)
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
if self.session.dispatch.after_transaction_end:
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
info=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is used,
queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running, and
will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
``flush()`` call to this ``Session`` before proceeding. This is a
convenience feature so that ``flush()`` need not be called
repeatedly in order for database queries to retrieve results. It's
typical that ``autoflush`` is used in conjunction with
``autocommit=False``. In this scenario, explicit calls to
``flush()`` are rarely needed; you usually only need to call
``commit()`` (which flushes) to finalize changes.
:param bind: An optional ``Engine`` or ``Connection`` to which this
``Session`` should be bound. When specified, all SQL operations
performed by this session will execute via this connectable.
:param binds: An optional dictionary which contains more granular
"bind" information than the ``bind`` parameter provides. This
dictionary can map individual ``Table`` instances as well as
``Mapper`` instances to individual ``Engine`` or ``Connection``
objects. Operations which proceed relative to a particular
``Mapper`` will consult this dictionary for the direct ``Mapper``
instance as well as the mapper's ``mapped_table`` attribute in
order to locate an connectable to use. The full resolution is
described in the ``get_bind()`` method of ``Session``.
Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper`
and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
``sessionmaker()`` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries, including auto-expiry
of instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
begin(), all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each ``commit()``, so that
all attribute/object access subsequent to a completed transaction
will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the :attr:`.Session.info`
attribute. Note the dictionary is copied at construction time so
that modifications to the per-:class:`.Session` dictionary will be local
to that :class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the ``query()`` method. Defaults to
:class:`~sqlalchemy.orm.query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a ``commit()``,
after ``flush()`` has been issued for all attached databases, the
``prepare()`` method on each database's ``TwoPhaseTransaction``
will be called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.items():
insp = inspect(mapperortable)
if insp.is_selectable:
self.bind_table(mapperortable, bind)
elif insp.is_mapper:
self.bind_mapper(mapperortable, bind)
else:
assert False
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictioanry can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this Session.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this
:meth:`~.Session.begin` can create a subtransaction if a transaction
is already in progress. For documentation on subtransactions, please
see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on
SAVEPOINT transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
begin() is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`.Connection` returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`.Connection` is returned
using :meth:`.Engine.contextual_connect` on the underlying
:class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`.Engine.connect`, indicating
the :class:`.Connection` should be considered "single use",
automatically closing when the first result set is closed. This
flag only has an effect if this :class:`.Session` is configured with
``autocommit=True`` and does not already have a transaction
in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`~.expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`.Connection.execute`, whether this is passed as a single
dictionary, or a list of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`.Connection` which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`.Connection`, which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`.Table` objects passed to the method; see the documentation
for :meth:`.Session.get_bind` for a full description of this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`.ResultProxy` returned by the :meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`.Connection` available, a temporary :class:`.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`.ResultProxy` has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`.expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`.Connection.execute` - core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state._detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystalize + document resolution order
# vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A ``Table`` instance
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this ``Table`` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based on mapper.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`.Table` associated with
bound :class:`.MetaData`.
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new ``Query`` object corresponding to this ``Session``."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
.. versionadded:: 0.7.6
"""
autoflush = self.autoflush
self.autoflush = False
yield self
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occuring prematurely")
util.raise_from_cause(e)
def refresh(self, instance, attribute_names=None, lockmode=None):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
if loading.load_on_ident(
self.query(object_mapper(instance)),
state.key, refresh_state=state,
lockmode=lockmode,
only_load_props=attribute_names) is None:
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" %
instance_str(instance))
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(state.manager.mapper.cascade_iterator(
'refresh-expire', state))
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach()
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" %
state_str(state))
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
self._expunge_state(state)
for o, m, st_, dct_ in cascaded:
self._expunge_state(st_)
def _expunge_state(self, state):
if state in self._new:
self._new.pop(state)
state._detach()
elif self.identity_map.contains_state(state):
self.identity_map.discard(state)
self._deleted.pop(state, None)
state._detach()
elif self.transaction:
self.transaction._deleted.pop(state, None)
def _register_newly_persistent(self, states):
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if _none_set.issubset(instance_key[1]) and \
not mapper.allow_partial_pks or \
_none_set.issuperset(instance_key[1]):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such aswithin a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key, instance_key)
state.key = instance_key
self.identity_map.replace(state)
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states),
self.identity_map
)
self._register_altered(states)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.deleted = True
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
'save-update',
state,
halt_on=self._contains_state):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
if state in self._deleted:
return
# ensure object is attached to allow the
# cascade operation to load deferred attributes
# and collections
self._attach(state, include_before=True)
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(state.manager.mapper.cascade_iterator(
'delete', state))
self._deleted[state] = state.obj()
self.identity_map.add(state)
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target instance.
The resulting target instance is then returned by the method; the
original source instance is left unmodified, and un-associated with the
:class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the method.
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive=_recursive)
finally:
self.autoflush = autoflush
def _merge(self, state, state_dict, load=True, _recursive=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
if key in self.identity_map:
merged = self.identity_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False.")
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif not _none_set.issubset(key[1]) or \
(mapper.allow_partial_pks and
not _none_set.issuperset(key[1])):
merged = self.query(mapper.class_).get(key[1])
else:
merged = None
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
if existing_version is not attributes.PASSIVE_NO_RESULT and \
merged_version is not attributes.PASSIVE_NO_RESULT and \
existing_version != merged_version:
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version
))
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
load, _recursive)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" %
state_str(state))
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - it can't be registered "
"as pending" % state_str(state))
self._before_attach(state)
if state not in self._new:
self._new[state] = state.obj()
state.insert_order = len(self._new)
self._attach(state)
def _update_impl(self, state, discard_existing=False):
if (self.identity_map.contains_state(state) and
state not in self._deleted):
return
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
if state.deleted:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. Use the make_transient() "
"function to send this object back to the transient state." %
state_str(state)
)
self._before_attach(state)
self._deleted.pop(state, None)
if discard_existing:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
self._attach(state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def _delete_impl(self, state):
if state in self._deleted:
return
if state.key is None:
return
self._attach(state, include_before=True)
self._deleted[state] = state.obj()
self.identity_map.add(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key values present on this
object - it follows that this functionality
generally only works for many-to-one-relationships.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`.relationship`. Unlike
that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. versionadded:: 0.8
.. seealso::
``load_on_pending`` at :func:`.relationship` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
"""
state = attributes.instance_state(obj)
self._attach(state, include_before=True)
state._load_pending = True
def _before_attach(self, state):
if state.session_id != self.hash_key and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
def _attach(self, state, include_before=False):
if state.key and \
state.key in self.identity_map and \
not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError("Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (state_str(state), state.key))
if state.session_id and \
state.session_id is not self.hash_key and \
state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state),
state.session_id, self.hash_key))
if state.session_id != self.hash_key:
if include_before and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = state.obj()
if self.dispatch.after_attach:
self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(list(self._new.values()) + list(self.identity_map.values()))
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations int the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead."
% method)
def _is_clean(self):
return not self.identity_map.check_modified() and \
not self._deleted and \
not self._new
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(o)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = (
_state_mapper(state)._is_orphan(state) and state.has_identity)
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
flush_context.register_object(state, isdelete=True)
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[(state, state.dict) for state in
self.identity_map._modified],
instance_dict=self.identity_map)
util.warn("Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been reset, "
"and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning."
% len_)
# useful assertions:
#if not objects:
# assert not self.identity_map._modified
#else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def is_modified(self, instance, include_collections=True,
passive=True):
"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
.. versionchanged:: 0.8
When using SQLAlchemy 0.7 and earlier, the ``passive``
flag should **always** be explicitly set to ``True``,
else SQL loads/autoflushes may proceed which can affect
the modified state itself:
``session.is_modified(someobject, passive=True)``\ .
In 0.8 and above, the behavior is corrected and
this flag is ignored.
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may report
``False`` when tested with this method. This is because
the object may have received change events via attribute
mutation, thus placing it in :attr:`.Session.dirty`,
but ultimately the state is the same as that loaded from
the database, resulting in no net change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive:
.. versionchanged:: 0.8
Ignored for backwards compatibility.
When using SQLAlchemy 0.7 and earlier, this flag should always
be set to ``True``.
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if \
(
not include_collections and
hasattr(attr.impl, 'get_collection')
) or not hasattr(attr.impl, 'get_history'):
continue
(added, unchanged, deleted) = \
attr.impl.get_history(state, dict_,
passive=attributes.NO_CHANGE)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is replaced
with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect is a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[state.obj()
for state in self._dirty_states
if state not in self._deleted])
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`.Engine` objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(self, bind=None, class_=Session, autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None, **kw):
"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`.Engine` or other :class:`.Connectable` with
which newly created :class:`.Session` objects will be associated.
:param class_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the constructor
of newly created :class:`.Session` objects.
"""
kw['bind'] = bind
kw['autoflush'] = autoflush
kw['autocommit'] = autocommit
kw['expire_on_commit'] = expire_on_commit
if info is not None:
kw['info'] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == 'info' and 'info' in local_kw:
d = v.copy()
d.update(local_kw['info'])
local_kw['info'] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r,%s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items())
)
def make_transient(instance):
"""Make the given instance 'transient'.
This will remove its association with any
session and additionally will remove its "identity key",
such that it's as though the object were newly constructed,
except retaining its values. It also resets the
"deleted" flag on the state if this object
had been explicitly deleted by its session.
Attributes which were "expired" or deferred at the
instance level are reverted to undefined, and
will not trigger any loads.
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_state(state)
# remove expired state and
# deferred callables
state.callables.clear()
if state.key:
del state.key
if state.deleted:
del state.deleted
def object_session(instance):
"""Return the ``Session`` to which instance belongs.
If the instance is not a mapped instance, an error is raised.
"""
try:
return _state_session(attributes.instance_state(instance))
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
_new_sessionid = util.counter()
| gpl-2.0 |
gdgellatly/OCB1 | openerp/addons/base/__init__.py | 64 | 1117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import module
import res
import report
import test
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kohnle-lernmodule/KITexe201based | exe/engine/reflectionidevice.py | 6 | 8153 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
A Reflection Idevice presents question/s for the student to think about
before they look at the answer/s
"""
import logging
from exe.engine.idevice import Idevice
from exe.engine.translate import lateTranslate
from exe.engine.field import TextAreaField
import re
log = logging.getLogger(__name__)
# ===========================================================================
class ReflectionIdevice(Idevice):
"""
A Reflection Idevice presents question/s for the student to think about
before they look at the answer/s
"""
persistenceVersion = 8
def __init__(self, activity = "", answer = ""):
"""
Initialize
"""
Idevice.__init__(self,
x_(u"Reflection"),
x_(u"University of Auckland"),
x_(u"""Reflection is a teaching method often used to
connect theory to practice. Reflection tasks often provide learners with an
opportunity to observe and reflect on their observations before presenting
these as a piece of academic work. Journals, diaries, profiles and portfolios
are useful tools for collecting observation data. Rubrics and guides can be
effective feedback tools."""), u"", u"reflection")
self.emphasis = Idevice.SomeEmphasis
self._activityInstruc = x_(u"""Enter a question for learners
to reflect upon.""")
self._answerInstruc = x_(u"""Describe how learners will assess how
they have done in the exercise (rubrics are useful devices for providing
reflective feedback).""")
self.systemResources += ["common.js"]
self.activityTextArea = TextAreaField(x_(u'Reflective question:'),
self._activityInstruc, activity)
self.activityTextArea.idevice = self
self.answerTextArea = TextAreaField(x_(u'Feedback:'),
self._answerInstruc, answer)
self.answerTextArea.idevice = self
# Properties
activityInstruc = lateTranslate('activityInstruc')
answerInstruc = lateTranslate('answerInstruc')
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'activityTextArea')\
and hasattr(self.activityTextArea, 'images'):
for this_image in self.activityTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.activityTextArea
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'answerTextArea')\
and hasattr(self.answerTextArea, 'images'):
for this_image in self.answerTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.answerTextArea
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
if hasattr(self, 'activityTextArea'):
fields_list.append(self.activityTextArea)
if hasattr(self, 'answerTextArea'):
fields_list.append(self.answerTextArea)
return fields_list
def burstHTML(self, i):
"""
takes a BeautifulSoup fragment (i) and bursts its contents to
import this idevice from a CommonCartridge export
"""
# Reflection Idevice:
title = i.find(name='h2', attrs={'class' : 'iDeviceTitle' })
self.title = title.renderContents().decode('utf-8')
reflections = i.findAll(name='div', attrs={'id' : re.compile('^ta') })
# should be exactly two of these:
# 1st = field[0] == Activity
if len(reflections) >= 1:
self.activityTextArea.content_wo_resourcePaths = \
reflections[0].renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.activityTextArea.content_w_resourcePaths = \
self.activityTextArea.MassageResourceDirsIntoContent( \
self.activityTextArea.content_wo_resourcePaths)
self.activityTextArea.content = \
self.activityTextArea.content_w_resourcePaths
# 2nd = field[1] == Answer
if len(reflections) >= 2:
self.answerTextArea.content_wo_resourcePaths = \
reflections[1].renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.answerTextArea.content_w_resourcePaths = \
self.answerTextArea.MassageResourceDirsIntoContent( \
self.answerTextArea.content_wo_resourcePaths)
self.answerTextArea.content = \
self.answerTextArea.content_w_resourcePaths
def upgradeToVersion1(self):
"""
Upgrades the node from version 0 to 1.
"""
log.debug(u"Upgrading iDevice")
self.icon = u"reflection"
def upgradeToVersion2(self):
"""
Upgrades the node from 1 (v0.5) to 2 (v0.6).
Old packages will loose their icons, but they will load.
"""
log.debug(u"Upgrading iDevice")
self.emphasis = Idevice.SomeEmphasis
def upgradeToVersion3(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion4(self):
"""
Upgrades to exe v0.10
"""
self._upgradeIdeviceToVersion1()
self._activityInstruc = self.__dict__['activityInstruc']
self._answerInstruc = self.__dict__['answerInstruc']
def upgradeToVersion5(self):
"""
Upgrades to exe v0.10
"""
self._upgradeIdeviceToVersion1()
def upgradeToVersion6(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
self.systemResources += ["common.js"]
def upgradeToVersion7(self):
"""
Upgrades to somewhere before version 0.25 (post-v0.24)
Taking the old unicode string fields, and converting them
into image-enabled TextAreaFields:
"""
self.activityTextArea = TextAreaField(x_(u'Reflective question:'),
self._activityInstruc, self.activity)
self.activityTextArea.idevice = self
self.answerTextArea = TextAreaField(x_(u'Feedback:'),
self._answerInstruc, self.answer)
self.answerTextArea.idevice = self
def upgradeToVersion8(self):
"""
Delete icon from system resources
"""
self._upgradeIdeviceToVersion3()
# ===========================================================================
| gpl-2.0 |
mitchellrj/neo4j-rest-client | neo4jrestclient/tests/test_indices.py | 3 | 9478 | # -*- coding: utf-8 -*-
from datetime import datetime
import unittest
import os
from neo4jrestclient import client
from neo4jrestclient.exceptions import NotFoundError, StatusException
from neo4jrestclient.utils import PY2
NEO4J_URL = os.environ.get('NEO4J_URL', "http://localhost:7474/db/data/")
NEO4J_VERSION = os.environ.get('NEO4J_VERSION', None)
class GraphDatabaseTesCase(unittest.TestCase):
def setUp(self):
self.url = NEO4J_URL
self.gdb = client.GraphDatabase(self.url)
def tearDown(self):
if self.gdb:
self.gdb.flush()
class IndicesTestCase(GraphDatabaseTesCase):
def test_create_index_for_nodes(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
index = self.gdb.nodes.indexes.create(name="doe")
index["surnames"]["d"] = n1
self.assertTrue(n1 in index["surnames"]["d"])
def test_create_index_for_nodes_and_dots(self):
# From https://github.com/versae/neo4j-rest-client/issues/43
n1 = self.gdb.nodes.create(name="John.Doe", place="Texas.s")
index = self.gdb.nodes.indexes.create(name="dots")
index["surnames.s"]["d.d"] = n1
self.assertTrue(n1 in index["surnames.s"]["d.d"])
def test_create_index_for_nodes_unicode(self):
n1 = self.gdb.nodes.create(name="Lemmy", band="Motörhead")
index = self.gdb.nodes.indexes.create(name="doe")
index["bands"]["Motörhead"] = n1
self.assertTrue(n1 in index["bands"]["Motörhead"])
def test_create_index_for_nodes_and_boolean(self):
n1 = self.gdb.nodes.create(name="John", is_real=True, is_fake=False)
index = self.gdb.nodes.indexes.create(name="boolean")
index["is_real"][True] = n1
index["is_fake"][False] = n1
self.assertTrue(n1 in index["is_real"][True])
self.assertTrue(n1 in index["is_fake"][False])
def test_create_index_for_nodes_and_number(self):
n1 = self.gdb.nodes.create(name="John", age=30, mean=2.7)
index = self.gdb.nodes.indexes.create(name="number")
index["age"][30] = n1
index["mean"][2.7] = n1
self.assertTrue(n1 in index["age"][30])
self.assertTrue(n1 in index["mean"][2.7])
def test_create_index_for_nodes_and_unicode(self):
index = self.gdb.nodes.indexes.create(name="unicode")
n1 = self.gdb.nodes.create(name="First")
key = u"Profesión"
value = u"Înformáticö"
n1.set(key, value)
index[key][value] = n1
self.assertTrue(n1 in index[key][value])
n2 = self.gdb.nodes.create(name="Second")
key = u"Título/Nombre"
value = u"Necronomicón"
n2.set(key, value)
index[key][value] = n2
self.assertTrue(n2 in index[key][value])
def test_create_index_for_nodes_url_safe(self):
n1 = self.gdb.nodes.create(name="Brian", place="AC/DC")
index = self.gdb.nodes.indexes.create(name="doe")
index["bands"]["AC/DC"] = n1
self.assertTrue(n1 in index["bands"]["AC/DC"])
def test_delete_index_for_nodes(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
index = self.gdb.nodes.indexes.create(name="doe")
index["surnames"]["d"] = n1
index.delete()
self.assertRaises(NotFoundError,
index["surnames"].__getitem__, "d")
def test_create_index_for_relationships(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
n2 = self.gdb.nodes.create(name="Michael Doe", place="Tijuana")
r1 = self.gdb.relationships.create(n1, "Hates", n2)
index = self.gdb.relationships.indexes.create(name="brothers")
index["feeling"]["hate"] = r1
self.assertTrue(r1 in index["feeling"]["hate"])
def test_delete_node_from_index(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
index = self.gdb.nodes.indexes.create(name="doe")
index["surnames"]["d"] = n1
index.delete("surnames", "d", n1)
self.assertTrue(n1 not in index["surnames"]["d"])
def test_delete_node_from_index_with_no_value(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
index = self.gdb.nodes.indexes.create(name="doe")
index["surnames"]["d"] = n1
index.delete("surnames", None, n1)
self.assertTrue(n1 not in index["surnames"]["d"])
def test_delete_node_from_index_with_no_value_nor_key(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
index = self.gdb.nodes.indexes.create(name="doe")
index["surnames"]["d"] = n1
index.delete(None, None, n1)
self.assertTrue(n1 not in index["surnames"]["d"])
def test_delete_relationship_from_index(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
n2 = self.gdb.nodes.create(name="Michael Doe", place="Tijuana")
r1 = self.gdb.relationships.create(n1, "Hates", n2)
index = self.gdb.relationships.indexes.create(name="brothers")
index["feeling"]["hate"] = r1
index.delete("feeling", "hate", r1)
self.assertTrue(r1 not in index["feeling"]["hate"])
def test_delete_index_for_relationships(self):
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
n2 = self.gdb.nodes.create(name="Michael Doe", place="Tijuana")
r1 = self.gdb.relationships.create(n1, "Hates", n2)
index = self.gdb.relationships.indexes.create(name="brothers")
index["feeling"]["hate"] = r1
index.delete()
self.assertRaises(NotFoundError,
index["feeling"].__getitem__, "hate")
@unittest.skipIf(not PY2,
"Lucene Query Builder is not Python3 compliant yet")
def test_query_index(self):
Q = client.Q
n1 = self.gdb.nodes.create(name="John Doe", place="Texas")
n2 = self.gdb.nodes.create(name="Michael Donald", place="Tijuana")
index = self.gdb.nodes.indexes.create(name="do", type="fulltext")
index["surnames"]["doe"] = n1
index["surnames"]["donald"] = n2
index['place']['Texas'] = n1
index['place']['Tijuana'] = n2
results = index.query("surnames", "do*")
self.assertTrue(n1 in results and n2 in results)
results = index.query("surnames:do*")
self.assertTrue(n1 in results and n2 in results)
results = index.query('surnames', Q('do*', wildcard=True))
self.assertTrue(n1 in results and n2 in results)
results = index.query(Q('surnames', 'do*', wildcard=True))
self.assertTrue(n1 in results and n2 in results)
results = index.query(Q('surnames', 'do*', wildcard=True)
& Q('place', 'Tijuana'))
self.assertTrue(n1 not in results and n2 in results)
results = index.query(-Q('surnames', 'donald') | +Q('place', 'Texas'))
self.assertTrue(n2 not in results and n1 in results)
@unittest.skipIf(NEO4J_VERSION in ["1.6.3", "1.7.2", "1.8.3"],
"Not supported by Neo4j {}".format(NEO4J_VERSION))
def test_index_get_or_create_created(self):
index = self.gdb.nodes.indexes.create(name="doe")
properties = {
"name": "Lemmy",
"band": "Motörhead",
}
n1 = index.get_or_create(key="bands", value="Motörhead",
properties=properties)
self.assertTrue(n1 in index["bands"]["Motörhead"])
@unittest.skipIf(NEO4J_VERSION in ["1.6.3", "1.7.2", "1.8.3"],
"Not supported by Neo4j {}".format(NEO4J_VERSION))
def test_index_get_or_create_existing(self):
index = self.gdb.nodes.indexes.create(name="doe")
now = datetime.now().strftime('%s%f')
properties = {
"name": "Lemmy",
"band": "Motörhead",
"now": now,
}
n1 = self.gdb.nodes.create(**properties)
index["now"][now] = n1
n2 = index.get_or_create(key="now", value=now,
properties=properties)
self.assertEqual(n1, n2)
self.assertTrue(n1 in index["now"][now])
self.assertTrue(n2 in index["now"][now])
@unittest.skipIf(NEO4J_VERSION in ["1.6.3", "1.7.2", "1.8.3"],
"Not supported by Neo4j {}".format(NEO4J_VERSION))
def test_index_create_or_fail_created(self):
index = self.gdb.nodes.indexes.create(name="doe")
properties = {
"name": "Lemmy",
"band": "Motörhead",
}
n1 = index.create_or_fail(key="bands", value="Motörhead",
properties=properties)
self.assertTrue(n1 in index["bands"]["Motörhead"])
@unittest.skipIf(NEO4J_VERSION in ["1.6.3", "1.7.2", "1.8.3"],
"Not supported by Neo4j {}".format(NEO4J_VERSION))
def test_index_create_or_fail_existing(self):
index = self.gdb.nodes.indexes.create(name="doe")
now = datetime.now().strftime('%s%f')
properties = {
"name": "Lemmy",
"band": "Motörhead",
"now": now,
}
n1 = self.gdb.nodes.create(**properties)
index["now"][now] = n1
self.assertRaises((Exception, ValueError, StatusException),
index.create_or_fail,
key="now", value=now, properties=properties)
| gpl-3.0 |
msabramo/requests | requests/async.py | 33 | 2717 | # -*- coding: utf-8 -*-
"""
requests.async
~~~~~~~~~~~~~~
This module contains an asynchronous replica of ``requests.api``, powered
by gevent. All API methods return a ``Request`` instance (as opposed to
``Response``). A list of requests can be sent with ``map()``.
"""
try:
import gevent
from gevent import monkey as curious_george
from gevent.pool import Pool
except ImportError:
raise RuntimeError('Gevent is required for requests.async.')
# Monkey-patch.
curious_george.patch_all(thread=False, select=False)
from . import api
__all__ = (
'map', 'imap',
'get', 'options', 'head', 'post', 'put', 'patch', 'delete', 'request'
)
def patched(f):
"""Patches a given API function to not send."""
def wrapped(*args, **kwargs):
kwargs['return_response'] = False
kwargs['prefetch'] = True
config = kwargs.get('config', {})
config.update(safe_mode=True)
kwargs['config'] = config
return f(*args, **kwargs)
return wrapped
def send(r, pool=None, prefetch=False):
"""Sends the request object using the specified pool. If a pool isn't
specified this method blocks. Pools are useful because you can specify size
and can hence limit concurrency."""
if pool != None:
return pool.spawn(r.send, prefetch=prefetch)
return gevent.spawn(r.send, prefetch=prefetch)
# Patched requests.api functions.
get = patched(api.get)
options = patched(api.options)
head = patched(api.head)
post = patched(api.post)
put = patched(api.put)
patch = patched(api.patch)
delete = patched(api.delete)
request = patched(api.request)
def map(requests, prefetch=True, size=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
"""
requests = list(requests)
pool = Pool(size) if size else None
jobs = [send(r, pool, prefetch=prefetch) for r in requests]
gevent.joinall(jobs)
return [r.response for r in requests]
def imap(requests, prefetch=True, size=2):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
"""
pool = Pool(size)
def send(r):
r.send(prefetch)
return r.response
for r in pool.imap_unordered(send, requests):
yield r
pool.join() | isc |
fialakarel/smallClown | lib/pubsubclient-2.6/tests/testcases/mqtt_basic.py | 42 | 1151 | import unittest
import settings
import time
import mosquitto
import serial
def on_message(mosq, obj, msg):
obj.message_queue.append(msg)
class mqtt_basic(unittest.TestCase):
message_queue = []
@classmethod
def setUpClass(self):
self.client = mosquitto.Mosquitto("pubsubclient_ut", clean_session=True,obj=self)
self.client.connect(settings.server_ip)
self.client.on_message = on_message
self.client.subscribe("outTopic",0)
@classmethod
def tearDownClass(self):
self.client.disconnect()
def test_one(self):
i=30
while len(self.message_queue) == 0 and i > 0:
self.client.loop()
time.sleep(0.5)
i -= 1
self.assertTrue(i>0, "message receive timed-out")
self.assertEqual(len(self.message_queue), 1, "unexpected number of messages received")
msg = self.message_queue[0]
self.assertEqual(msg.mid,0,"message id not 0")
self.assertEqual(msg.topic,"outTopic","message topic incorrect")
self.assertEqual(msg.payload,"hello world")
self.assertEqual(msg.qos,0,"message qos not 0")
self.assertEqual(msg.retain,False,"message retain flag incorrect")
| mit |
kennedyshead/home-assistant | tests/components/google_translate/test_tts.py | 8 | 4158 | """The tests for the Google speech platform."""
import os
import shutil
from unittest.mock import patch
from gtts import gTTSError
import pytest
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
DOMAIN as DOMAIN_MP,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.config import async_process_ha_core_config
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service
from tests.components.tts.test_init import mutagen_mock # noqa: F401
@pytest.fixture(autouse=True)
def cleanup_cache(hass):
"""Clean up TTS cache."""
yield
default_tts = hass.config.path(tts.DEFAULT_CACHE_DIR)
if os.path.isdir(default_tts):
shutil.rmtree(default_tts)
@pytest.fixture
async def calls(hass):
"""Mock media player calls."""
return async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
@pytest.fixture(autouse=True)
async def setup_internal_url(hass):
"""Set up internal url."""
await async_process_ha_core_config(
hass, {"internal_url": "http://example.local:8123"}
)
@pytest.fixture
def mock_gtts():
"""Mock gtts."""
with patch("homeassistant.components.google_translate.tts.gTTS") as mock_gtts:
yield mock_gtts
async def test_service_say(hass, mock_gtts, calls):
"""Test service call say."""
await async_setup_component(
hass, tts.DOMAIN, {tts.DOMAIN: {"platform": "google_translate"}}
)
await hass.services.async_call(
tts.DOMAIN,
"google_translate_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
},
blocking=True,
)
assert len(calls) == 1
assert len(mock_gtts.mock_calls) == 2
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(".mp3") != -1
assert mock_gtts.mock_calls[0][2] == {
"text": "There is a person at the front door.",
"lang": "en",
}
async def test_service_say_german_config(hass, mock_gtts, calls):
"""Test service call say with german code in the config."""
await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "google_translate", "language": "de"}},
)
await hass.services.async_call(
tts.DOMAIN,
"google_translate_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
},
blocking=True,
)
assert len(calls) == 1
assert len(mock_gtts.mock_calls) == 2
assert mock_gtts.mock_calls[0][2] == {
"text": "There is a person at the front door.",
"lang": "de",
}
async def test_service_say_german_service(hass, mock_gtts, calls):
"""Test service call say with german code in the service."""
config = {
tts.DOMAIN: {"platform": "google_translate", "service_name": "google_say"}
}
await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"google_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
assert len(calls) == 1
assert len(mock_gtts.mock_calls) == 2
assert mock_gtts.mock_calls[0][2] == {
"text": "There is a person at the front door.",
"lang": "de",
}
async def test_service_say_error(hass, mock_gtts, calls):
"""Test service call say with http response 400."""
mock_gtts.return_value.write_to_fp.side_effect = gTTSError
await async_setup_component(
hass, tts.DOMAIN, {tts.DOMAIN: {"platform": "google_translate"}}
)
await hass.services.async_call(
tts.DOMAIN,
"google_translate_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
},
blocking=True,
)
assert len(calls) == 0
assert len(mock_gtts.mock_calls) == 2
| apache-2.0 |
google/contentbox | main/templatetags/profile_thumbnail.py | 4 | 1039 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
from social.apps.django_app.default.models import UserSocialAuth
register = template.Library()
@register.filter(name='profile_thumbnail')
# Converts youtube URL into embed HTML
def youtube_embed_url(user):
try:
return UserSocialAuth.get_social_auth('google-oauth2',user.email).extra_data['image']['url']
except:
return '/static/global/images/placeholder/user.png'
| apache-2.0 |
Tennyson53/SUR | magnum/tests/unit/db/sqlalchemy/test_types.py | 15 | 2573 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for custom SQLAlchemy types via Magnum DB."""
from oslo_db import exception as db_exc
from magnum.common import utils as magnum_utils
import magnum.db.sqlalchemy.api as sa_api
from magnum.db.sqlalchemy import models
from magnum.tests.unit.db import base
class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
def test_JSONEncodedDict_default_value(self):
# Create pod w/o labels
pod1_id = magnum_utils.generate_uuid()
self.dbapi.create_pod({'uuid': pod1_id})
pod1 = sa_api.model_query(models.Pod).filter_by(uuid=pod1_id).one()
self.assertEqual({}, pod1.labels)
# Create pod with labels
pod2_id = magnum_utils.generate_uuid()
self.dbapi.create_pod({'uuid': pod2_id, 'labels': {'bar': 'foo'}})
pod2 = sa_api.model_query(models.Pod).filter_by(uuid=pod2_id).one()
self.assertEqual('foo', pod2.labels['bar'])
def test_JSONEncodedDict_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_pod,
{'labels':
['this is not a dict']})
def test_JSONEncodedList_default_value(self):
# Create pod w/o images
pod1_id = magnum_utils.generate_uuid()
self.dbapi.create_pod({'uuid': pod1_id})
pod1 = sa_api.model_query(models.Pod).filter_by(uuid=pod1_id).one()
self.assertEqual([], pod1.images)
# Create pod with images
pod2_id = magnum_utils.generate_uuid()
self.dbapi.create_pod({'uuid': pod2_id,
'images': ['myimage1', 'myimage2']})
pod2 = sa_api.model_query(models.Pod).filter_by(uuid=pod2_id).one()
self.assertEqual(['myimage1', 'myimage2'], pod2.images)
def test_JSONEncodedList_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_pod,
{'images':
{'this is not a list': 'test'}})
| apache-2.0 |
KurtDeGreeff/infernal-twin | build/pip/pip/_vendor/packaging/_compat.py | 901 | 1253 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| gpl-3.0 |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/libs/hadoop/src/hadoop/core_site.py | 32 | 2327 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deprecated: not used anymore and will be empty
"""
import errno
import logging
import os.path
import conf
import confparse
__all = ['get_conf', 'get_trash_interval']
LOG = logging.getLogger(__name__)
_CORE_SITE_PATH = None # Path to core-site.xml
_CORE_SITE_DICT = None # A dictionary of name/value config options
_CNF_TRASH_INTERVAL = 'fs.trash.interval'
def reset():
"""Reset the cached conf"""
global _CORE_SITE_DICT
_CORE_SITE_DICT = None
def get_conf():
"""get_conf() -> ConfParse object for core-site.xml"""
if _CORE_SITE_DICT is None:
_parse_core_site()
return _CORE_SITE_DICT
def _parse_core_site():
"""
Parse core-site.xml and store in _CORE_SITE_DICT
"""
global _CORE_SITE_DICT
global _CORE_SITE_PATH
for indentifier in conf.HDFS_CLUSTERS.get():
try:
_CORE_SITE_PATH = os.path.join(conf.HDFS_CLUSTERS[indentifier].HADOOP_CONF_DIR.get(), 'core-site.xml') # Will KeyError and be empty as HADOOP_CONF_DIR does not exist anymore
data = file(_CORE_SITE_PATH, 'r').read()
break
except KeyError:
data = ""
except IOError, err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
return
# Keep going and make an empty ConfParse
data = ""
_CORE_SITE_DICT = confparse.ConfParse(data)
def get_trash_interval():
"""
Get trash interval
Also indicates whether trash is enabled or not.
"""
return get_conf().get(_CNF_TRASH_INTERVAL)
| gpl-2.0 |
sitsbeyou/Django-facebook | docs/docs_env/Lib/encodings/iso2022_jp_3.py | 816 | 1061 | #
# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_3')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
ccomb/OpenUpgrade | openerp/addons/base/module/wizard/base_module_upgrade.py | 40 | 4914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
ir_model_data = self.pool.get('ir.model.data')
__, res_id = ir_model_data.get_object_reference(cr, uid, 'base', 'view_base_module_upgrade_install')
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'views': [(res_id, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sinkuri256/python-for-android | python3-alpha/extra_modules/gdata/exif/__init__.py | 45 | 6980 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = '[email protected]'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string)
| apache-2.0 |
batxes/4Cin | Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models32405.py | 4 | 13931 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((4509.93, -2984.29, 4692.57), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((4475.02, -3423.4, 3748.71), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4758.46, -1535.37, 3950.4), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((5079.17, 740.939, 4567.49), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5166.8, 1433.88, 4740.61), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((6481.54, -89.4396, 4063.93), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((7434.36, 1523.33, 3583.75), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((8622.87, 1423.36, 2335.11), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((9150.7, 2533.68, 1366.88), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((10268.5, 3848.5, 771.47), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((9256.35, 4844.19, -156.717), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((9713.09, 4497.06, -2190.03), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((10289, 4058.94, -4124.05), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((9765.45, 2805.38, -3222.31), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((11029.5, 3823.09, -2723.89), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((11464.2, 4565.94, -1380.1), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((11199.6, 4356.69, 23.8284), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((10841.5, 4345.34, 1541.41), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((11142.5, 2668.39, 2033.87), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((11598.3, 2848.4, 3453.71), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((12597.5, 3357.53, 4849.27), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((14131.2, 3506.84, 5536.34), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((12826, 4058.38, 5289.36), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((11428.4, 5476.83, 4550.62), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((10712, 7229.46, 3538.12), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((10441.2, 8120.09, 3025.73), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((8423.09, 8434.45, 4829.63), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((7376.32, 9561.94, 5876.18), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((7569.47, 9159.09, 7056.17), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((8470.15, 8914.61, 9183.46), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((7771.42, 8572.16, 8661.58), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6878.08, 9617.19, 9006.81), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((5457.36, 10122.3, 10703.1), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4152.69, 9502.98, 10506), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((3368.75, 8211.27, 10452.3), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((2412.32, 7051.91, 11387.9), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2587.99, 5730.69, 12431.7), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4170.08, 6041.88, 12375.4), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((4170.78, 7559.03, 12637.9), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((4239.77, 8506.05, 10875.7), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((3173.93, 9168.92, 10899.8), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((4105.56, 8629.66, 10094.8), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((3649.01, 7712.28, 9967.45), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3894.81, 8476.83, 10189.3), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((5253.01, 9587.06, 9670.7), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((5855.52, 9746.95, 6858.18), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((4998.59, 10174.7, 5241.96), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((4090.92, 9978.19, 4567.28), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2223.63, 10676.8, 4579.41), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((193.016, 12396, 4805.57), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((322.816, 12328.4, 6503.11), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((3202.23, 12698.5, 6653.02), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((3017.9, 11966.6, 6860.09), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((2056.03, 11200.9, 8321.49), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((1383.93, 9979.35, 8997.46), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2037.44, 8384.06, 8367.5), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
Yong-Lee/django | django/utils/ipv6.py | 225 | 7971 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| bsd-3-clause |
mobo95/pyload | module/lib/jinja2/filters.py | 64 | 21750 | # -*- coding: utf-8 -*-
"""
jinja2.filters
~~~~~~~~~~~~~~
Bundled jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import math
from random import choice
from operator import itemgetter
from itertools import imap, groupby
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode
from jinja2.runtime import Undefined
from jinja2.exceptions import FilterArgumentError, SecurityError
_word_re = re.compile(r'\w+(?u)')
def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
def evalcontextfilter(f):
"""Decorator for marking eval-context dependent filters. An eval
context object is passed as first argument. For more information
about the eval context, see :ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfilter = True
return f
def environmentfilter(f):
"""Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(unicode(value))
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return unicode(s).replace(unicode(old), unicode(new), count)
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
return s.replace(soft_unicode(old), soft_unicode(new), count)
def do_upper(s):
"""Convert a value to uppercase."""
return soft_unicode(s).upper()
def do_lower(s):
"""Convert a value to lowercase."""
return soft_unicode(s).lower()
@evalcontextfilter
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = u' '.join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in d.iteritems()
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = u' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s):
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_unicode(s).capitalize()
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
return soft_unicode(s).title()
def do_dictsort(value, case_sensitive=False, by='key'):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dicsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value.
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either '
'"key" or "value"')
def sort_func(item):
value = item[pos]
if isinstance(value, basestring) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func)
def do_sort(value, reverse=False, case_sensitive=False):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
"""
if not case_sensitive:
def sort_func(item):
if isinstance(item, basestring):
item = item.lower()
return item
else:
sort_func = None
return sorted(value, key=sort_func, reverse=reverse)
def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if (boolean and not value) or isinstance(value, Undefined):
return default_value
return value
@evalcontextfilter
def do_join(eval_ctx, value, d=u''):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
"""
# no automatic escaping? joining is a lot eaiser then
if not eval_ctx.autoescape:
return unicode(d).join(imap(unicode, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, '__html__'):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = unicode(item)
if do_escape:
d = escape(d)
else:
d = unicode(d)
return d.join(value)
# no html involved, to normal joining
return soft_unicode(d).join(imap(soft_unicode, value))
def do_center(value, width=80):
"""Centers the value in a field of a given width."""
return unicode(value).center(width)
@environmentfilter
def do_first(environment, seq):
"""Return the first item of a sequence."""
try:
return iter(seq).next()
except StopIteration:
return environment.undefined('No first item, sequence was empty.')
@environmentfilter
def do_last(environment, seq):
"""Return the last item of a sequence."""
try:
return iter(reversed(seq)).next()
except StopIteration:
return environment.undefined('No last item, sequence was empty.')
@environmentfilter
def do_random(environment, seq):
"""Return a random item from the sequence."""
try:
return choice(seq)
except IndexError:
return environment.undefined('No random item, sequence was empty.')
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 KB,
4.1 MB, 102 bytes, etc). Per default decimal prefixes are used (mega,
giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (mebi, gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
middle = binary and 'i' or ''
if bytes < base:
return "%d Byte%s" % (bytes, bytes != 1 and 's' or '')
elif bytes < base * base:
return "%.1f K%sB" % (bytes / base, middle)
elif bytes < base * base * base:
return "%.1f M%sB" % (bytes / (base * base), middle)
return "%.1f G%sB" % (bytes / (base * base * base), middle)
def do_pprint(value, verbose=False):
"""Pretty print a variable. Useful for debugging.
With Jinja 1.2 onwards you can pass it a parameter. If this parameter
is truthy the output will be more verbose (this requires `pretty`)
"""
return pformat(value, verbose=verbose)
@evalcontextfilter
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
"""
rv = urlize(value, trim_url_limit, nofollow)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = u' ' * width
rv = (u'\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv
def do_truncate(s, length=255, killwords=False, end='...'):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will try to save the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter.
.. sourcecode jinja::
{{ mytext|truncate(300, false, '»') }}
truncate mytext to 300 chars, don't split up words, use a
right pointing double arrow as ellipsis sign.
"""
if len(s) <= length:
return s
elif killwords:
return s[:length] + end
words = s.split(' ')
result = []
m = 0
for word in words:
m += len(word) + 1
if m > length:
break
result.append(word)
result.append(end)
return u' '.join(result)
def do_wordwrap(s, width=79, break_long_words=True):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`.
"""
import textwrap
return u'\n'.join(textwrap.wrap(s, width=width, expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words))
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(s))
def do_int(value, default=0):
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter.
"""
try:
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value, *args, **kwargs):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
if args and kwargs:
raise FilterArgumentError('can\'t handle positional and keyword '
'arguments at the same time')
return soft_unicode(value) % (kwargs or args)
def do_trim(value):
"""Strip leading and trailing whitespace."""
return soft_unicode(value).strip()
def do_striptags(value):
"""Strip SGML/XML tags and replace adjacent whitespace by one space.
"""
if hasattr(value, '__html__'):
value = value.__html__()
return Markup(unicode(value)).striptags()
def do_slice(value, slices, fill_with=None):
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in xrange(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
result = []
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(value, precision=0, method='common'):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if method == 'common':
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@environmentfilter
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
{% endfor %}</ul></li>
{% endfor %}
</ul>
Additionally it's possible to use tuple unpacking for the grouper and
list:
.. sourcecode:: html+jinja
<ul>
{% for grouper, list in persons|groupby('gender') %}
...
{% endfor %}
</ul>
As you can see the item we're grouping by is stored in the `grouper`
attribute and the `list` contains all the objects that have this grouper
in common.
"""
expr = lambda x: environment.getitem(x, attribute)
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
class _GroupTuple(tuple):
__slots__ = ()
grouper = property(itemgetter(0))
list = property(itemgetter(1))
def __new__(cls, (key, value)):
return tuple.__new__(cls, (key, list(value)))
def do_list(value):
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
def do_mark_safe(value):
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value):
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return unicode(value)
def do_reverse(value):
"""Reverse the object or return an iterator the iterates over it the other
way round.
"""
if isinstance(value, basestring):
return value[::-1]
try:
return reversed(value)
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError:
raise FilterArgumentError('argument must be iterable')
@environmentfilter
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not \
environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
FILTERS = {
'attr': do_attr,
'replace': do_replace,
'upper': do_upper,
'lower': do_lower,
'escape': escape,
'e': escape,
'forceescape': do_forceescape,
'capitalize': do_capitalize,
'title': do_title,
'default': do_default,
'd': do_default,
'join': do_join,
'count': len,
'dictsort': do_dictsort,
'sort': do_sort,
'length': len,
'reverse': do_reverse,
'center': do_center,
'indent': do_indent,
'title': do_title,
'capitalize': do_capitalize,
'first': do_first,
'last': do_last,
'random': do_random,
'filesizeformat': do_filesizeformat,
'pprint': do_pprint,
'truncate': do_truncate,
'wordwrap': do_wordwrap,
'wordcount': do_wordcount,
'int': do_int,
'float': do_float,
'string': soft_unicode,
'list': do_list,
'urlize': do_urlize,
'format': do_format,
'trim': do_trim,
'striptags': do_striptags,
'slice': do_slice,
'batch': do_batch,
'sum': sum,
'abs': abs,
'round': do_round,
'groupby': do_groupby,
'safe': do_mark_safe,
'xmlattr': do_xmlattr
}
| gpl-3.0 |
ckundo/nvda | source/NVDAHelper.py | 2 | 20623 | import os
import sys
import _winreg
import msvcrt
import winKernel
import config
from ctypes import *
from ctypes.wintypes import *
from comtypes import BSTR
import winUser
import eventHandler
import queueHandler
import api
import globalVars
from logHandler import log
import time
import globalVars
_remoteLib=None
_remoteLoader64=None
localLib=None
generateBeep=None
VBuf_getTextInRange=None
lastInputLanguageName=None
lastInputMethodName=None
#utility function to point an exported function pointer in a dll to a ctypes wrapped python function
def _setDllFuncPointer(dll,name,cfunc):
cast(getattr(dll,name),POINTER(c_void_p)).contents.value=cast(cfunc,c_void_p).value
#Implementation of nvdaController methods
@WINFUNCTYPE(c_long,c_wchar_p)
def nvdaController_speakText(text):
focus=api.getFocusObject()
if focus.sleepMode==focus.SLEEP_FULL:
return -1
import queueHandler
import speech
queueHandler.queueFunction(queueHandler.eventQueue,speech.speakText,text)
return 0
@WINFUNCTYPE(c_long)
def nvdaController_cancelSpeech():
focus=api.getFocusObject()
if focus.sleepMode==focus.SLEEP_FULL:
return -1
import queueHandler
import speech
queueHandler.queueFunction(queueHandler.eventQueue,speech.cancelSpeech)
return 0
@WINFUNCTYPE(c_long,c_wchar_p)
def nvdaController_brailleMessage(text):
focus=api.getFocusObject()
if focus.sleepMode==focus.SLEEP_FULL:
return -1
import queueHandler
import braille
queueHandler.queueFunction(queueHandler.eventQueue,braille.handler.message,text)
return 0
def _lookupKeyboardLayoutNameWithHexString(layoutString):
buf=create_unicode_buffer(1024)
bufSize=c_int(2048)
key=HKEY()
if windll.advapi32.RegOpenKeyExW(_winreg.HKEY_LOCAL_MACHINE,u"SYSTEM\\CurrentControlSet\\Control\\Keyboard Layouts\\"+ layoutString,0,_winreg.KEY_QUERY_VALUE,byref(key))==0:
try:
if windll.advapi32.RegQueryValueExW(key,u"Layout Display Name",0,None,buf,byref(bufSize))==0:
windll.shlwapi.SHLoadIndirectString(buf.value,buf,1023,None)
return buf.value
if windll.advapi32.RegQueryValueExW(key,u"Layout Text",0,None,buf,byref(bufSize))==0:
return buf.value
finally:
windll.advapi32.RegCloseKey(key)
@WINFUNCTYPE(c_long,c_wchar_p)
def nvdaControllerInternal_requestRegistration(uuidString):
pid=c_long()
windll.rpcrt4.I_RpcBindingInqLocalClientPID(None,byref(pid))
pid=pid.value
if not pid:
log.error("Could not get process ID for RPC call")
return -1;
bindingHandle=c_long()
bindingHandle.value=localLib.createRemoteBindingHandle(uuidString)
if not bindingHandle:
log.error("Could not bind to inproc rpc server for pid %d"%pid)
return -1
registrationHandle=c_long()
res=localLib.nvdaInProcUtils_registerNVDAProcess(bindingHandle,byref(registrationHandle))
if res!=0 or not registrationHandle:
log.error("Could not register NVDA with inproc rpc server for pid %d, res %d, registrationHandle %s"%(pid,res,registrationHandle))
windll.rpcrt4.RpcBindingFree(byref(bindingHandle))
return -1
import appModuleHandler
queueHandler.queueFunction(queueHandler.eventQueue,appModuleHandler.update,pid,helperLocalBindingHandle=bindingHandle,inprocRegistrationHandle=registrationHandle)
return 0
@WINFUNCTYPE(c_long,c_long,c_long,c_long,c_long,c_long)
def nvdaControllerInternal_displayModelTextChangeNotify(hwnd, left, top, right, bottom):
import displayModel
displayModel.textChangeNotify(hwnd, left, top, right, bottom)
return 0
@WINFUNCTYPE(c_long,c_long,c_long,c_wchar_p)
def nvdaControllerInternal_logMessage(level,pid,message):
if not log.isEnabledFor(level):
return 0
if pid:
from appModuleHandler import getAppNameFromProcessID
codepath="RPC process %s (%s)"%(pid,getAppNameFromProcessID(pid,includeExt=True))
else:
codepath="NVDAHelperLocal"
log._log(level,message,[],codepath=codepath)
return 0
def handleInputCompositionEnd(result):
import speech
import characterProcessing
from NVDAObjects.inputComposition import InputComposition
from NVDAObjects.behaviors import CandidateItem
focus=api.getFocusObject()
result=result.lstrip(u'\u3000 ')
curInputComposition=None
if isinstance(focus,InputComposition):
curInputComposition=focus
oldSpeechMode=speech.speechMode
speech.speechMode=speech.speechMode_off
eventHandler.executeEvent("gainFocus",focus.parent)
speech.speechMode=oldSpeechMode
elif isinstance(focus.parent,InputComposition):
#Candidate list is still up
curInputComposition=focus.parent
focus.parent=focus.parent.parent
if curInputComposition and not result:
result=curInputComposition.compositionString.lstrip(u'\u3000 ')
if result:
speech.speakText(result,symbolLevel=characterProcessing.SYMLVL_ALL)
def handleInputCompositionStart(compositionString,selectionStart,selectionEnd,isReading):
import speech
from NVDAObjects.inputComposition import InputComposition
from NVDAObjects.behaviors import CandidateItem
focus=api.getFocusObject()
if focus.parent and isinstance(focus.parent,InputComposition):
#Candidates infront of existing composition string
announce=not config.conf["inputComposition"]["announceSelectedCandidate"]
focus.parent.compositionUpdate(compositionString,selectionStart,selectionEnd,isReading,announce=announce)
return 0
#IME keeps updating input composition while the candidate list is open
#Therefore ignore new composition updates if candidate selections are configured for speaking.
if config.conf["inputComposition"]["announceSelectedCandidate"] and isinstance(focus,CandidateItem):
return 0
if not isinstance(focus,InputComposition):
parent=api.getDesktopObject().objectWithFocus()
curInputComposition=InputComposition(parent=parent)
oldSpeechMode=speech.speechMode
speech.speechMode=speech.speechMode_off
eventHandler.executeEvent("gainFocus",curInputComposition)
focus=curInputComposition
speech.speechMode=oldSpeechMode
focus.compositionUpdate(compositionString,selectionStart,selectionEnd,isReading)
@WINFUNCTYPE(c_long,c_wchar_p,c_int,c_int,c_int)
def nvdaControllerInternal_inputCompositionUpdate(compositionString,selectionStart,selectionEnd,isReading):
from NVDAObjects.inputComposition import InputComposition
if selectionStart==-1:
queueHandler.queueFunction(queueHandler.eventQueue,handleInputCompositionEnd,compositionString)
return 0
focus=api.getFocusObject()
if isinstance(focus,InputComposition):
focus.compositionUpdate(compositionString,selectionStart,selectionEnd,isReading)
else:
queueHandler.queueFunction(queueHandler.eventQueue,handleInputCompositionStart,compositionString,selectionStart,selectionEnd,isReading)
return 0
def handleInputCandidateListUpdate(candidatesString,selectionIndex,inputMethod):
candidateStrings=candidatesString.split('\n')
import speech
from NVDAObjects.inputComposition import InputComposition, CandidateList, CandidateItem
focus=api.getFocusObject()
if not (0<=selectionIndex<len(candidateStrings)):
if isinstance(focus,CandidateItem):
oldSpeechMode=speech.speechMode
speech.speechMode=speech.speechMode_off
eventHandler.executeEvent("gainFocus",focus.parent)
speech.speechMode=oldSpeechMode
return
oldCandidateItemsText=None
if isinstance(focus,CandidateItem):
oldCandidateItemsText=focus.visibleCandidateItemsText
parent=focus.parent
wasCandidate=True
else:
parent=focus
wasCandidate=False
item=CandidateItem(parent=parent,candidateStrings=candidateStrings,candidateIndex=selectionIndex,inputMethod=inputMethod)
if wasCandidate and focus.windowHandle==item.windowHandle and focus.candidateIndex==item.candidateIndex and focus.name==item.name:
return
if config.conf["inputComposition"]["autoReportAllCandidates"] and item.visibleCandidateItemsText!=oldCandidateItemsText:
import ui
ui.message(item.visibleCandidateItemsText)
eventHandler.executeEvent("gainFocus",item)
@WINFUNCTYPE(c_long,c_wchar_p,c_long,c_wchar_p)
def nvdaControllerInternal_inputCandidateListUpdate(candidatesString,selectionIndex,inputMethod):
queueHandler.queueFunction(queueHandler.eventQueue,handleInputCandidateListUpdate,candidatesString,selectionIndex,inputMethod)
return 0
inputConversionModeMessages={
1:(
# Translators: A mode that allows typing in the actual 'native' characters for an east-Asian input method language currently selected, rather than alpha numeric (Roman/English) characters.
_("Native input"),
# Translators: a mode that lets you type in alpha numeric (roman/english) characters, rather than 'native' characters for the east-Asian input method language currently selected.
_("Alpha numeric input")
),
8:(
# Translators: for East-Asian input methods, a mode that allows typing in full-shaped (full double-byte) characters, rather than the smaller half-shaped ones.
_("Full shaped mode"),
# Translators: for East-Asian input methods, a mode that allows typing in half-shaped (single-byte) characters, rather than the larger full-shaped (double-byte) ones.
_("Half shaped mode")
),
}
JapaneseInputConversionModeMessages= {
# Translators: For Japanese character input: half-shaped (single-byte) alpha numeric (roman/english) mode.
0: _("half alphanumeric"),
# Translators: For Japanese character input: half-shaped (single-byte) Katacana input mode.
3: _("half katakana"),
# Translators: For Japanese character input: alpha numeric (roman/english) mode.
8: _("alphanumeric"),
# Translators: For Japanese character input: Hiragana input mode.
9: _("hiragana"),
# Translators: For Japanese character input: Katacana input mode.
11: _("katakana"),
# Translators: For Japanese character input: half-shaped (single-byte) alpha numeric (roman/english) mode.
16: _("half alphanumeric"),
# Translators: For Japanese character input: half katakana roman input mode.
19: _("half katakana roman"),
# Translators: For Japanese character input: alpha numeric (roman/english) mode.
24: _("alphanumeric"),
# Translators: For Japanese character input: Hiragana Roman input mode.
25: _("hiragana roman"),
# Translators: For Japanese character input: Katacana Roman input mode.
27: _("katakana roman"),
}
def handleInputConversionModeUpdate(oldFlags,newFlags,lcid):
import speech
textList=[]
if newFlags!=oldFlags and lcid&0xff==0x11: #Japanese
msg=JapaneseInputConversionModeMessages.get(newFlags)
if msg:
textList.append(msg)
else:
for x in xrange(32):
x=2**x
msgs=inputConversionModeMessages.get(x)
if not msgs: continue
newOn=bool(newFlags&x)
oldOn=bool(oldFlags&x)
if newOn!=oldOn:
textList.append(msgs[0] if newOn else msgs[1])
if len(textList)>0:
queueHandler.queueFunction(queueHandler.eventQueue,speech.speakMessage," ".join(textList))
@WINFUNCTYPE(c_long,c_long,c_long,c_ulong)
def nvdaControllerInternal_inputConversionModeUpdate(oldFlags,newFlags,lcid):
queueHandler.queueFunction(queueHandler.eventQueue,handleInputConversionModeUpdate,oldFlags,newFlags,lcid)
return 0
@WINFUNCTYPE(c_long,c_long)
def nvdaControllerInternal_IMEOpenStatusUpdate(opened):
if opened:
# Translators: a message when the IME open status changes to opened
message=_("IME opened")
else:
# Translators: a message when the IME open status changes to closed
message=_("IME closed")
import ui
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,message)
return 0
@WINFUNCTYPE(c_long,c_long,c_ulong,c_wchar_p)
def nvdaControllerInternal_inputLangChangeNotify(threadID,hkl,layoutString):
global lastInputMethodName, lastInputLanguageName
focus=api.getFocusObject()
#This callback can be called before NVDa is fully initialized
#So also handle focus object being None as well as checking for sleepMode
if not focus or focus.sleepMode:
return 0
import NVDAObjects.window
#Generally we should not allow input lang changes from threads that are not focused.
#But threadIDs for console windows are always wrong so don't ignore for those.
if not isinstance(focus,NVDAObjects.window.Window) or (threadID!=focus.windowThreadID and focus.windowClassName!="ConsoleWindowClass"):
return 0
import sayAllHandler
#Never announce changes while in sayAll (#1676)
if sayAllHandler.isRunning():
return 0
import queueHandler
import ui
languageID=hkl&0xffff
buf=create_unicode_buffer(1024)
res=windll.kernel32.GetLocaleInfoW(languageID,2,buf,1024)
# Translators: the label for an unknown language when switching input methods.
inputLanguageName=buf.value if res else _("unknown language")
layoutStringCodes=[]
inputMethodName=None
#layoutString can either be a real input method name, a hex string for an input method name in the registry, or an empty string.
#If its a real input method name its used as is.
#If its a hex string or its empty, then the method name is looked up by trying:
#The full hex string, the hkl as a hex string, the low word of the hex string or hkl, the high word of the hex string or hkl.
if layoutString:
try:
int(layoutString,16)
layoutStringCodes.append(layoutString)
except ValueError:
inputMethodName=layoutString
if not inputMethodName:
layoutStringCodes.insert(0,hex(hkl)[2:].rstrip('L').upper().rjust(8,'0'))
for stringCode in list(layoutStringCodes):
layoutStringCodes.append(stringCode[4:].rjust(8,'0'))
if stringCode[0]<'D':
layoutStringCodes.append(stringCode[0:4].rjust(8,'0'))
for stringCode in layoutStringCodes:
inputMethodName=_lookupKeyboardLayoutNameWithHexString(stringCode)
if inputMethodName: break
if not inputMethodName:
log.debugWarning("Could not find layout name for keyboard layout, reporting as unknown")
# Translators: The label for an unknown input method when switching input methods.
inputMethodName=_("unknown input method")
if ' - ' in inputMethodName:
inputMethodName="".join(inputMethodName.split(' - ')[1:])
if inputLanguageName!=lastInputLanguageName:
lastInputLanguageName=inputLanguageName
# Translators: the message announcing the language and keyboard layout when it changes
inputMethodName=_("{language} - {layout}").format(language=inputLanguageName,layout=inputMethodName)
if inputMethodName!=lastInputMethodName:
lastInputMethodName=inputMethodName
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,inputMethodName)
return 0
@WINFUNCTYPE(c_long,c_long,c_wchar)
def nvdaControllerInternal_typedCharacterNotify(threadID,ch):
focus=api.getFocusObject()
if focus.windowClassName!="ConsoleWindowClass":
eventHandler.queueEvent("typedCharacter",focus,ch=ch)
return 0
@WINFUNCTYPE(c_long, c_int, c_int)
def nvdaControllerInternal_vbufChangeNotify(rootDocHandle, rootID):
import virtualBuffers
virtualBuffers.VirtualBuffer.changeNotify(rootDocHandle, rootID)
return 0
@WINFUNCTYPE(c_long, c_wchar_p)
def nvdaControllerInternal_installAddonPackageFromPath(addonPath):
import wx
from gui import addonGui
log.debug("Requesting installation of add-on from %s", addonPath)
wx.CallAfter(addonGui.AddonsDialog.handleRemoteAddonInstall, addonPath)
return 0
class RemoteLoader64(object):
def __init__(self):
# Create a pipe so we can write to stdin of the loader process.
pipeReadOrig, self._pipeWrite = winKernel.CreatePipe(None, 0)
# Make the read end of the pipe inheritable.
pipeRead = self._duplicateAsInheritable(pipeReadOrig)
winKernel.closeHandle(pipeReadOrig)
# stdout/stderr of the loader process should go to nul.
with file("nul", "w") as nul:
nulHandle = self._duplicateAsInheritable(msvcrt.get_osfhandle(nul.fileno()))
# Set the process to start with the appropriate std* handles.
si = winKernel.STARTUPINFO(dwFlags=winKernel.STARTF_USESTDHANDLES, hSTDInput=pipeRead, hSTDOutput=nulHandle, hSTDError=nulHandle)
pi = winKernel.PROCESS_INFORMATION()
# Even if we have uiAccess privileges, they will not be inherited by default.
# Therefore, explicitly specify our own process token, which causes them to be inherited.
token = winKernel.OpenProcessToken(winKernel.GetCurrentProcess(), winKernel.MAXIMUM_ALLOWED)
try:
winKernel.CreateProcessAsUser(token, None, u"lib64/nvdaHelperRemoteLoader.exe", None, None, True, None, None, None, si, pi)
# We don't need the thread handle.
winKernel.closeHandle(pi.hThread)
self._process = pi.hProcess
except:
winKernel.closeHandle(self._pipeWrite)
raise
finally:
winKernel.closeHandle(pipeRead)
winKernel.closeHandle(token)
def _duplicateAsInheritable(self, handle):
curProc = winKernel.GetCurrentProcess()
return winKernel.DuplicateHandle(curProc, handle, curProc, 0, True, winKernel.DUPLICATE_SAME_ACCESS)
def terminate(self):
# Closing the write end of the pipe will cause EOF for the waiting loader process, which will then exit gracefully.
winKernel.closeHandle(self._pipeWrite)
# Wait until it's dead.
winKernel.waitForSingleObject(self._process, winKernel.INFINITE)
winKernel.closeHandle(self._process)
def initialize():
global _remoteLib, _remoteLoader64, localLib, generateBeep,VBuf_getTextInRange
localLib=cdll.LoadLibrary('lib/nvdaHelperLocal.dll')
for name,func in [
("nvdaController_speakText",nvdaController_speakText),
("nvdaController_cancelSpeech",nvdaController_cancelSpeech),
("nvdaController_brailleMessage",nvdaController_brailleMessage),
("nvdaControllerInternal_requestRegistration",nvdaControllerInternal_requestRegistration),
("nvdaControllerInternal_inputLangChangeNotify",nvdaControllerInternal_inputLangChangeNotify),
("nvdaControllerInternal_typedCharacterNotify",nvdaControllerInternal_typedCharacterNotify),
("nvdaControllerInternal_displayModelTextChangeNotify",nvdaControllerInternal_displayModelTextChangeNotify),
("nvdaControllerInternal_logMessage",nvdaControllerInternal_logMessage),
("nvdaControllerInternal_inputCompositionUpdate",nvdaControllerInternal_inputCompositionUpdate),
("nvdaControllerInternal_inputCandidateListUpdate",nvdaControllerInternal_inputCandidateListUpdate),
("nvdaControllerInternal_IMEOpenStatusUpdate",nvdaControllerInternal_IMEOpenStatusUpdate),
("nvdaControllerInternal_inputConversionModeUpdate",nvdaControllerInternal_inputConversionModeUpdate),
("nvdaControllerInternal_vbufChangeNotify",nvdaControllerInternal_vbufChangeNotify),
("nvdaControllerInternal_installAddonPackageFromPath",nvdaControllerInternal_installAddonPackageFromPath),
]:
try:
_setDllFuncPointer(localLib,"_%s"%name,func)
except AttributeError as e:
log.error("nvdaHelperLocal function pointer for %s could not be found, possibly old nvdaHelperLocal dll"%name,exc_info=True)
raise e
localLib.nvdaHelperLocal_initialize()
generateBeep=localLib.generateBeep
generateBeep.argtypes=[c_char_p,c_float,c_uint,c_ubyte,c_ubyte]
generateBeep.restype=c_uint
# Handle VBuf_getTextInRange's BSTR out parameter so that the BSTR will be freed automatically.
VBuf_getTextInRange = CFUNCTYPE(c_int, c_int, c_int, c_int, POINTER(BSTR), c_int)(
("VBuf_getTextInRange", localLib),
((1,), (1,), (1,), (2,), (1,)))
#Load nvdaHelperRemote.dll but with an altered search path so it can pick up other dlls in lib
h=windll.kernel32.LoadLibraryExW(os.path.abspath(ur"lib\nvdaHelperRemote.dll"),0,0x8)
if not h:
log.critical("Error loading nvdaHelperRemote.dll: %s" % WinError())
return
_remoteLib=CDLL("nvdaHelperRemote",handle=h)
if _remoteLib.injection_initialize(globalVars.appArgs.secure) == 0:
raise RuntimeError("Error initializing NVDAHelperRemote")
if not _remoteLib.installIA2Support():
log.error("Error installing IA2 support")
#Manually start the in-process manager thread for this NVDA main thread now, as a slow system can cause this action to confuse WX
_remoteLib.initInprocManagerThreadIfNeeded()
if os.environ.get('PROCESSOR_ARCHITEW6432')=='AMD64':
_remoteLoader64=RemoteLoader64()
def terminate():
global _remoteLib, _remoteLoader64, localLib, generateBeep, VBuf_getTextInRange
if not _remoteLib.uninstallIA2Support():
log.debugWarning("Error uninstalling IA2 support")
if _remoteLib.injection_terminate() == 0:
raise RuntimeError("Error terminating NVDAHelperRemote")
_remoteLib=None
if _remoteLoader64:
_remoteLoader64.terminate()
_remoteLoader64=None
generateBeep=None
VBuf_getTextInRange=None
localLib.nvdaHelperLocal_terminate()
localLib=None
| gpl-2.0 |
ibmsoe/tensorflow | tensorflow/contrib/keras/python/keras/preprocessing/image_test.py | 36 | 7925 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
def _generate_test_images():
img_w = img_h = 20
rgb_images = []
gray_images = []
for _ in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
gray_images.append(im)
return [rgb_images, gray_images]
class TestImage(test.TestCase):
def test_image_data_generator(self):
if PIL is None:
return # Skip test if PIL is not available.
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
generator.fit(images, augment=True)
for x, _ in generator.flow(
images,
np.arange(images.shape[0]),
shuffle=True):
self.assertEqual(x.shape[1:], images.shape[1:])
break
def test_image_data_generator_invalid_data(self):
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
with self.assertRaises(ValueError):
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
with self.assertRaises(ValueError):
x = np.random.random((32, 10, 10, 5))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
x = np.random.random((32, 10, 10, 5))
generator.flow(np.arange(x.shape[0]))
with self.assertRaises(ValueError):
x = np.random.random((32, 10, 10))
generator.flow(np.arange(x.shape[0]))
with self.assertRaises(ValueError):
x = np.random.random((32, 3, 10, 10))
generator.flow(np.arange(x.shape[0]))
def test_image_data_generator_fit(self):
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_first')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory, os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'), os.path.join(
class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(temp_dir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(temp_dir, filename))
count += 1
# create iterator
generator = keras.preprocessing.image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
self.assertEqual(len(dir_iterator.class_indices), num_classes)
self.assertEqual(len(dir_iterator.classes), count)
self.assertEqual(sorted(dir_iterator.filenames), sorted(filenames))
def test_img_utils(self):
if PIL is None:
return # Skip test if PIL is not available.
height, width = 10, 8
# Test channels_first data format
x = np.random.random((3, height, width))
img = keras.preprocessing.image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = keras.preprocessing.image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test channels_last data format
x = np.random.random((height, width, 3))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test 2D
x = np.random.random((height, width, 1))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
if __name__ == '__main__':
test.main()
| apache-2.0 |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/flask-hello-world/env/lib/python3.5/site-packages/jinja2/asyncsupport.py | 117 | 7765 | # -*- coding: utf-8 -*-
"""
jinja2.asyncsupport
~~~~~~~~~~~~~~~~~~~
Has all the code for async support which is implemented as a patch
for supported Python versions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import asyncio
import inspect
from functools import update_wrapper
from jinja2.utils import concat, internalcode, Markup
from jinja2.environment import TemplateModule
from jinja2.runtime import LoopContextBase, _last_iteration
async def concat_async(async_gen):
rv = []
async def collect():
async for event in async_gen:
rv.append(event)
await collect()
return concat(rv)
async def generate_async(self, *args, **kwargs):
vars = dict(*args, **kwargs)
try:
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def wrap_generate_func(original_generate):
def _convert_generator(self, loop, args, kwargs):
async_gen = self.generate_async(*args, **kwargs)
try:
while 1:
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
raise RuntimeError('The environment was not created with async mode '
'enabled.')
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def wrap_render_func(original_render):
def render(self, *args, **kwargs):
if not self.environment.is_async:
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
return update_wrapper(render, original_render)
def wrap_block_reference_call(original_call):
@internalcode
async def async_call(self):
rv = await concat_async(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
@internalcode
def __call__(self):
if not self._context.environment.is_async:
return original_call(self)
return async_call(self)
return update_wrapper(__call__, original_call)
def wrap_macro_invoke(original_invoke):
@internalcode
async def async_invoke(self, arguments, autoescape):
rv = await self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
@internalcode
def _invoke(self, arguments, autoescape):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
return update_wrapper(_invoke, original_invoke)
@internalcode
async def get_default_module_async(self):
if self._module is not None:
return self._module
self._module = rv = await self.make_module_async()
return rv
def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
raise RuntimeError('Template module attribute is unavailable '
'in async mode')
return original_default_module(self)
return _get_default_module
async def make_module_async(self, vars=None, shared=False, locals=None):
context = self.new_context(vars, shared, locals)
body_stream = []
async for item in self.root_render_func(context):
body_stream.append(item)
return TemplateModule(self, context, body_stream)
def patch_template():
from jinja2 import Template
Template.generate = wrap_generate_func(Template.generate)
Template.generate_async = update_wrapper(
generate_async, Template.generate_async)
Template.render_async = update_wrapper(
render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
Template._get_default_module = wrap_default_module(
Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
make_module_async, Template.make_module_async)
def patch_runtime():
from jinja2.runtime import BlockReference, Macro
BlockReference.__call__ = wrap_block_reference_call(
BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
from jinja2.filters import FILTERS
from jinja2.asyncfilters import ASYNC_FILTERS
FILTERS.update(ASYNC_FILTERS)
def patch_all():
patch_template()
patch_runtime()
patch_filters()
async def auto_await(value):
if inspect.isawaitable(value):
return await value
return value
async def auto_aiter(iterable):
if hasattr(iterable, '__aiter__'):
async for item in iterable:
yield item
return
for item in iterable:
yield item
class AsyncLoopContext(LoopContextBase):
def __init__(self, async_iterator, after, length, recurse=None,
depth0=0):
LoopContextBase.__init__(self, recurse, depth0)
self._async_iterator = async_iterator
self._after = after
self._length = length
@property
def length(self):
if self._length is None:
raise TypeError('Loop length for some iterators cannot be '
'lazily calculated in async mode')
return self._length
def __aiter__(self):
return AsyncLoopContextIterator(self)
class AsyncLoopContextIterator(object):
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __aiter__(self):
return self
async def __anext__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopAsyncIteration()
next_elem = ctx._after
try:
ctx._after = await ctx._async_iterator.__anext__()
except StopAsyncIteration:
ctx._after = _last_iteration
return next_elem, ctx
async def make_async_loop_context(iterable, recurse=None, depth0=0):
# Length is more complicated and less efficient in async mode. The
# reason for this is that we cannot know if length will be used
# upfront but because length is a property we cannot lazily execute it
# later. This means that we need to buffer it up and measure :(
#
# We however only do this for actual iterators, not for async
# iterators as blocking here does not seem like the best idea in the
# world.
try:
length = len(iterable)
except (TypeError, AttributeError):
if not hasattr(iterable, '__aiter__'):
iterable = tuple(iterable)
length = len(iterable)
else:
length = None
async_iterator = auto_aiter(iterable)
try:
after = await async_iterator.__anext__()
except StopAsyncIteration:
after = _last_iteration
return AsyncLoopContext(async_iterator, after, length, recurse, depth0)
| mit |
manumathewthomas/Chat-with-Joey | chatbot/model.py | 1 | 9688 | # Copyright 2015 Conchylicultor. All Rights Reserved.
# Modifications copyright (C) 2016 Carlos Segura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Model to predict the next sentence given an input sequence
"""
import tensorflow as tf
from chatbot.textdata import Batch
class ProjectionOp:
""" Single layer perceptron
Project input tensor on the output dimension
"""
def __init__(self, shape, scope=None, dtype=None):
"""
Args:
shape: a tuple (input dim, output dim)
scope (str): encapsulate variables
dtype: the weights type
"""
assert len(shape) == 2
self.scope = scope
# Projection on the keyboard
with tf.variable_scope('weights_' + self.scope):
self.W = tf.get_variable(
'weights',
shape,
# initializer=tf.truncated_normal_initializer() # TODO: Tune value (fct of input size: 1/sqrt(input_dim))
dtype=dtype
)
self.b = tf.get_variable(
'bias',
shape[1],
initializer=tf.constant_initializer(),
dtype=dtype
)
def getWeights(self):
""" Convenience method for some tf arguments
"""
return self.W, self.b
def __call__(self, X):
""" Project the output of the decoder into the vocabulary space
Args:
X (tf.Tensor): input value
"""
with tf.name_scope(self.scope):
return tf.matmul(X, self.W) + self.b
class Model:
"""
Implementation of a seq2seq model.
Architecture:
Encoder/decoder
2 LTSM layers
"""
def __init__(self, args, textData):
"""
Args:
args: parameters of the model
textData: the dataset object
"""
print("Model creation...")
self.textData = textData # Keep a reference on the dataset
self.args = args # Keep track of the parameters of the model
self.dtype = tf.float32
# Placeholders
self.encoderInputs = None
self.decoderInputs = None # Same that decoderTarget plus the <go>
self.decoderTargets = None
self.decoderWeights = None # Adjust the learning to the target sentence size
# Main operators
self.lossFct = None
self.optOp = None
self.outputs = None # Outputs of the network, list of probability for each words
# Construct the graphs
self.buildNetwork()
def buildNetwork(self):
""" Create the computational graph
"""
# TODO: Create name_scopes (for better graph visualisation)
# TODO: Use buckets (better perfs)
# Parameters of sampled softmax (needed for attention mechanism and a large vocabulary size)
outputProjection = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if 0 < self.args.softmaxSamples < self.textData.getVocabularySize():
outputProjection = ProjectionOp(
(self.args.hiddenSize, self.textData.getVocabularySize()),
scope='softmax_projection',
dtype=self.dtype
)
def sampledSoftmax(inputs, labels):
labels = tf.reshape(labels, [-1, 1]) # Add one dimension (nb of true classes, here 1)
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
localWt = tf.cast(tf.transpose(outputProjection.W), tf.float32)
localB = tf.cast(outputProjection.b, tf.float32)
localInputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(
localWt, # Should have shape [num_classes, dim]
localB,
localInputs,
labels,
self.args.softmaxSamples, # The number of classes to randomly sample per batch
self.textData.getVocabularySize()), # The number of classes
self.dtype)
# Creation of the rnn cell
encoDecoCell = tf.nn.rnn_cell.BasicLSTMCell(self.args.hiddenSize, state_is_tuple=True) # Or GRUCell, LSTMCell(args.hiddenSize)
if not self.args.test: # TODO: Should use a placeholder instead
encoDecoCell = tf.nn.rnn_cell.DropoutWrapper(encoDecoCell, input_keep_prob=1.0, output_keep_prob=0.5) # TODO: Custom values
encoDecoCell = tf.nn.rnn_cell.MultiRNNCell([encoDecoCell] * self.args.numLayers, state_is_tuple=True)
# Network input (placeholders)
with tf.name_scope('placeholder_encoder'):
self.encoderInputs = [tf.placeholder(tf.int32, [None, ]) for _ in range(self.args.maxLengthEnco)] # Batch size * sequence length * input dim
with tf.name_scope('placeholder_decoder'):
self.decoderInputs = [tf.placeholder(tf.int32, [None, ], name='inputs') for _ in range(self.args.maxLengthDeco)] # Same sentence length for input and output (Right ?)
self.decoderTargets = [tf.placeholder(tf.int32, [None, ], name='targets') for _ in range(self.args.maxLengthDeco)]
self.decoderWeights = [tf.placeholder(tf.float32, [None, ], name='weights') for _ in range(self.args.maxLengthDeco)]
# Define the network
# Here we use an embedding model, it takes integer as input and convert them into word vector for
# better word representation
decoderOutputs, states = tf.nn.seq2seq.embedding_rnn_seq2seq(
self.encoderInputs, # List<[batch=?, inputDim=1]>, list of size args.maxLength
self.decoderInputs, # For training, we force the correct output (feed_previous=False)
encoDecoCell,
self.textData.getVocabularySize(),
self.textData.getVocabularySize(), # Both encoder and decoder have the same number of class
embedding_size=self.args.embeddingSize, # Dimension of each word
output_projection=outputProjection.getWeights() if outputProjection else None,
feed_previous=bool(self.args.test) # When we test (self.args.test), we use previous output as next input (feed_previous)
)
# TODO: When the LSTM hidden size is too big, we should project the LSTM output into a smaller space (4086 => 2046): Should speed up
# training and reduce memory usage. Other solution, use sampling softmax
# For testing only
if self.args.test:
if not outputProjection:
self.outputs = decoderOutputs
else:
self.outputs = [outputProjection(output) for output in decoderOutputs]
# TODO: Attach a summary to visualize the output
# For training only
else:
# Finally, we define the loss function
self.lossFct = tf.nn.seq2seq.sequence_loss(
decoderOutputs,
self.decoderTargets,
self.decoderWeights,
self.textData.getVocabularySize(),
softmax_loss_function= sampledSoftmax if outputProjection else None # If None, use default SoftMax
)
tf.summary.scalar('loss', self.lossFct) # Keep track of the cost
# Initialize the optimizer
opt = tf.train.AdamOptimizer(
learning_rate=self.args.learningRate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08
)
self.optOp = opt.minimize(self.lossFct)
def step(self, batch):
""" Forward/training step operation.
Does not perform run on itself but just return the operators to do so. Those have then to be run
Args:
batch (Batch): Input data on testing mode, input and target on output mode
Return:
(ops), dict: A tuple of the (training, loss) operators or (outputs,) in testing mode with the associated feed dictionary
"""
# Feed the dictionary
feedDict = {}
ops = None
if not self.args.test: # Training
for i in range(self.args.maxLengthEnco):
feedDict[self.encoderInputs[i]] = batch.encoderSeqs[i]
for i in range(self.args.maxLengthDeco):
feedDict[self.decoderInputs[i]] = batch.decoderSeqs[i]
feedDict[self.decoderTargets[i]] = batch.targetSeqs[i]
feedDict[self.decoderWeights[i]] = batch.weights[i]
ops = (self.optOp, self.lossFct)
else: # Testing (batchSize == 1)
for i in range(self.args.maxLengthEnco):
feedDict[self.encoderInputs[i]] = batch.encoderSeqs[i]
feedDict[self.decoderInputs[0]] = [self.textData.goToken]
ops = (self.outputs,)
# Return one pass operator
return ops, feedDict
| apache-2.0 |
quentinsf/ansible | test/units/plugins/strategies/test_strategy_base.py | 7 | 14666 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.strategies import StrategyBase
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
from six.moves import queue as Queue
from units.mock.loader import DictDataLoader
class TestStrategyBase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strategy_base_init(self):
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = MagicMock()
strategy_base = StrategyBase(tqm=mock_tqm)
def test_strategy_base_run(self):
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = MagicMock()
mock_tqm._stats = MagicMock()
mock_tqm.send_callback.return_value = None
mock_iterator = MagicMock()
mock_iterator._play = MagicMock()
mock_iterator._play.handlers = []
mock_conn_info = MagicMock()
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
strategy_base = StrategyBase(tqm=mock_tqm)
self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info), 0)
self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 1)
mock_tqm._failed_hosts = dict(host1=True)
self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 2)
mock_tqm._unreachable_hosts = dict(host1=True)
self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 3)
def test_strategy_base_get_hosts(self):
mock_hosts = []
for i in range(0, 5):
mock_host = MagicMock()
mock_host.name = "host%02d" % (i+1)
mock_hosts.append(mock_host)
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = mock_hosts
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
mock_tqm.get_inventory.return_value = mock_inventory
mock_play = MagicMock()
mock_play.hosts = ["host%02d" % (i+1) for i in range(0, 5)]
strategy_base = StrategyBase(tqm=mock_tqm)
mock_tqm._failed_hosts = []
mock_tqm._unreachable_hosts = []
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts)
mock_tqm._failed_hosts = ["host01"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[1:])
self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0]])
mock_tqm._unreachable_hosts = ["host02"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
def test_strategy_base_queue_task(self):
fake_loader = DictDataLoader()
workers = []
for i in range(0, 3):
worker_main_q = MagicMock()
worker_main_q.put.return_value = None
worker_result_q = MagicMock()
workers.append([i, worker_main_q, worker_result_q])
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
mock_tqm.get_workers.return_value = workers
mock_tqm.get_loader.return_value = fake_loader
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._cur_worker = 0
strategy_base._pending_results = 0
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 1)
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock())
self.assertEqual(strategy_base._cur_worker, 2)
self.assertEqual(strategy_base._pending_results, 2)
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock())
self.assertEqual(strategy_base._cur_worker, 0)
self.assertEqual(strategy_base._pending_results, 3)
workers[0][1].put.side_effect = EOFError
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 3)
def test_strategy_base_process_pending_results(self):
mock_tqm = MagicMock()
mock_tqm._terminated = False
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm.send_callback.return_value = None
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._stats.increment.return_value = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_host = MagicMock()
mock_host.name = 'test01'
mock_host.vars = dict()
mock_task = MagicMock()
mock_task._role = None
mock_task.ignore_errors = False
mock_group = MagicMock()
mock_group.add_host.return_value = None
def _get_host(host_name):
if host_name == 'test01':
return mock_host
return None
def _get_group(group_name):
if group_name in ('all', 'foo'):
return mock_group
return None
mock_inventory = MagicMock()
mock_inventory._hosts_cache = dict()
mock_inventory.get_host.side_effect = _get_host
mock_inventory.get_group.side_effect = _get_group
mock_inventory.clear_pattern_cache.return_value = None
mock_var_mgr = MagicMock()
mock_var_mgr.set_host_variable.return_value = None
mock_var_mgr.set_host_facts.return_value = None
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._variable_manager = mock_var_mgr
strategy_base._blocked_hosts = dict()
strategy_base._notified_handlers = dict()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
task_result = TaskResult(host=mock_host, task=mock_task, return_data=dict(changed=True))
queue_items.append(('host_task_ok', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
task_result = TaskResult(host=mock_host, task=mock_task, return_data='{"failed":true}')
queue_items.append(('host_task_failed', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._failed_hosts)
del mock_tqm._failed_hosts['test01']
task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}')
queue_items.append(('host_unreachable', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._unreachable_hosts)
del mock_tqm._unreachable_hosts['test01']
task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}')
queue_items.append(('host_task_skipped', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
queue_items.append(('add_host', dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
self.assertIn('test01', strategy_base._blocked_hosts)
queue_items.append(('add_group', mock_host, dict(add_group=dict(group_name='foo'))))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
self.assertIn('test01', strategy_base._blocked_hosts)
queue_items.append(('notify_handler', mock_host, 'test handler'))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
self.assertIn('test01', strategy_base._blocked_hosts)
self.assertIn('test handler', strategy_base._notified_handlers)
self.assertIn(mock_host, strategy_base._notified_handlers['test handler'])
queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
queue_items.append(('bad'))
self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
def test_strategy_base_load_included_file(self):
fake_loader = DictDataLoader({
"test.yml": """
- debug: msg='foo'
""",
"bad.yml": """
""",
})
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._loader = fake_loader
mock_play = MagicMock()
mock_block = MagicMock()
mock_block._play = mock_play
mock_block.vars = dict()
mock_task = MagicMock()
mock_task._block = mock_block
mock_task._role = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_inc_file = MagicMock()
mock_inc_file._task = mock_task
mock_inc_file._filename = "test.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
mock_inc_file._filename = "bad.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(res, [])
def test_strategy_base_run_handlers(self):
workers = []
for i in range(0, 3):
worker_main_q = MagicMock()
worker_main_q.put.return_value = None
worker_result_q = MagicMock()
workers.append([i, worker_main_q, worker_result_q])
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
mock_tqm.get_workers.return_value = workers
mock_tqm.send_callback.return_value = None
mock_conn_info = MagicMock()
mock_handler_task = MagicMock()
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
mock_handler = MagicMock()
mock_handler.block = [mock_handler_task]
mock_handler.flag_for_host.return_value = False
mock_play = MagicMock()
mock_play.handlers = [mock_handler]
mock_host = MagicMock()
mock_host.name = "test01"
mock_iterator = MagicMock()
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = [mock_host]
mock_var_mgr = MagicMock()
mock_var_mgr.get_vars.return_value = dict()
mock_iterator = MagicMock
mock_iterator._play = mock_play
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._notified_handlers = {"test handler": [mock_host]}
result = strategy_base.run_handlers(iterator=mock_iterator, connection_info=mock_conn_info)
| gpl-3.0 |
2014c2g3/0623exam | static/Brython3.1.1-20150328-091302/Lib/tempfile_1.py | 728 | 22357 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| gpl-3.0 |
zilantian/p2pool | p2pool/util/datachunker.py | 288 | 1407 | import collections
class StringBuffer(object):
'Buffer manager with great worst-case behavior'
def __init__(self, data=''):
self.buf = collections.deque([data])
self.buf_len = len(data)
self.pos = 0
def __len__(self):
return self.buf_len - self.pos
def add(self, data):
self.buf.append(data)
self.buf_len += len(data)
def get(self, wants):
if self.buf_len - self.pos < wants:
raise IndexError('not enough data')
data = []
while wants:
seg = self.buf[0][self.pos:self.pos+wants]
self.pos += len(seg)
while self.buf and self.pos >= len(self.buf[0]):
x = self.buf.popleft()
self.buf_len -= len(x)
self.pos -= len(x)
data.append(seg)
wants -= len(seg)
return ''.join(data)
def _DataChunker(receiver):
wants = receiver.next()
buf = StringBuffer()
while True:
if len(buf) >= wants:
wants = receiver.send(buf.get(wants))
else:
buf.add((yield))
def DataChunker(receiver):
'''
Produces a function that accepts data that is input into a generator
(receiver) in response to the receiver yielding the size of data to wait on
'''
x = _DataChunker(receiver)
x.next()
return x.send
| gpl-3.0 |
MechCoder/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
alphagov/notify-api | app/main/views/index.py | 1 | 4218 | from flask import jsonify, url_for, current_app
from .. import main
@main.route('/')
def index():
"""Entry point for the API, show the resources that are available."""
return jsonify(links={
"user.fetch_user_by_id": {
"url": url_for(
'.fetch_user_by_id',
user_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"user.fetch_user_by_email": {
"url": url_for(
'.fetch_user_by_email',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"user.authenticate": {
"url": url_for(
'.auth_user',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "POST"
},
"notification.create": {
"url": url_for(
'.create_sms_notification',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "POST"
},
"organisation.fetch": {
"url": url_for(
'.fetch_organisation',
organisation_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"job.fetch": {
"url": url_for(
'.fetch_job',
job_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"job.fetch_jobs_by_service": {
"url": url_for(
'.fetch_jobs_by_service',
service_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"job.create": {
"url": url_for(
'.create_job',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "POST"
},
"service.fetch_service_by_user_id_and_service_id": {
"url": url_for(
'.fetch_service_by_user_id_and_service_id',
user_id="123",
service_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"service.fetch_services_by_user": {
"url": url_for(
'.fetch_services_by_user',
user_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"service.create": {
"url": url_for(
'.create_service',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "POST"
},
"notification.fetch": {
"url": url_for(
'.fetch_notifications',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"notification.fetch_notifications_by_job": {
"url": url_for(
'.fetch_notifications_by_job',
job_id="123",
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "GET"
},
"notification.create_email": {
"url": url_for(
'.create_email_notification',
_external=True,
_scheme=current_app.config.get('NOTIFY_HTTP_PROTO', 'http')
),
"method": "POST"
}
}
), 200
| mit |
ximion/dak-dep11 | tests/test_process_gpgv_output.py | 6 | 1355 | #!/usr/bin/env python
from base_test import DakTestCase
import unittest
from daklib.utils import process_gpgv_output
class ProcessGPGVOutputTestCase(DakTestCase):
def assertParse(self, input, output):
self.assertEqual(process_gpgv_output(input)[0], output)
def assertNotParse(self, input):
ret = process_gpgv_output(input)
self.assertNotEqual(len(ret[1]), 0)
##
def testEmpty(self):
self.assertParse('', {})
def testBroken(self):
self.assertNotParse('foo')
self.assertNotParse(' foo ')
self.assertNotParse('[PREFIXPG:] KEY VAL1 VAL2 VAL3')
def testSimple(self):
self.assertParse(
'[GNUPG:] KEY VAL1 VAL2 VAL3',
{'KEY': ['VAL1', 'VAL2', 'VAL3']},
)
def testNoKeys(self):
self.assertParse('[GNUPG:] KEY', {'KEY': []})
def testDuplicate(self):
self.assertNotParse('[GNUPG:] TEST_KEY\n[GNUPG:] TEST_KEY')
self.assertNotParse('[GNUPG:] KEY VAL1\n[GNUPG:] KEY VAL2')
def testDuplicateSpecial(self):
# NODATA and friends are special
for special in ('NODATA', 'SIGEXPIRED', 'KEYEXPIRED'):
self.assertParse(
'[GNUPG:] %s\n[GNUPG:] %s' % (special, special),
{special: []},
)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
WebSpider/headphones | lib/unidecode/x00d.py | 252 | 4121 | data = (
'[?]', # 0x00
'[?]', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'e', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'o', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'll', # 0x33
'lll', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'e', # 0x46
'ee', # 0x47
'ai', # 0x48
'', # 0x49
'o', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'ae', # 0x87
'aae', # 0x88
'i', # 0x89
'ii', # 0x8a
'u', # 0x8b
'uu', # 0x8c
'R', # 0x8d
'RR', # 0x8e
'L', # 0x8f
'LL', # 0x90
'e', # 0x91
'ee', # 0x92
'ai', # 0x93
'o', # 0x94
'oo', # 0x95
'au', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'k', # 0x9a
'kh', # 0x9b
'g', # 0x9c
'gh', # 0x9d
'ng', # 0x9e
'nng', # 0x9f
'c', # 0xa0
'ch', # 0xa1
'j', # 0xa2
'jh', # 0xa3
'ny', # 0xa4
'jny', # 0xa5
'nyj', # 0xa6
'tt', # 0xa7
'tth', # 0xa8
'dd', # 0xa9
'ddh', # 0xaa
'nn', # 0xab
'nndd', # 0xac
't', # 0xad
'th', # 0xae
'd', # 0xaf
'dh', # 0xb0
'n', # 0xb1
'[?]', # 0xb2
'nd', # 0xb3
'p', # 0xb4
'ph', # 0xb5
'b', # 0xb6
'bh', # 0xb7
'm', # 0xb8
'mb', # 0xb9
'y', # 0xba
'r', # 0xbb
'[?]', # 0xbc
'l', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'v', # 0xc0
'sh', # 0xc1
'ss', # 0xc2
's', # 0xc3
'h', # 0xc4
'll', # 0xc5
'f', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'aa', # 0xcf
'ae', # 0xd0
'aae', # 0xd1
'i', # 0xd2
'ii', # 0xd3
'u', # 0xd4
'[?]', # 0xd5
'uu', # 0xd6
'[?]', # 0xd7
'R', # 0xd8
'e', # 0xd9
'ee', # 0xda
'ai', # 0xdb
'o', # 0xdc
'oo', # 0xdd
'au', # 0xde
'L', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'RR', # 0xf2
'LL', # 0xf3
' . ', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
BT-ojossen/odoo | addons/portal_project_issue/__openerp__.py | 375 | 1713 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Issue',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds issue menu and features to your portal if project_issue and portal are installed.
==================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['project_issue','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
'portal_project_issue_view.xml',
'views/portal_project_issue.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
trishnaguha/ansible | lib/ansible/modules/network/junos/junos_l3_interface.py | 26 | 6844 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_l3_interface
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage L3 interfaces on Juniper JUNOS network devices
description:
- This module provides declarative management of L3 interfaces
on Juniper JUNOS network devices.
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
unit:
description:
- Logical interface number.
default: 0
filter_input:
description:
- The name of input filter.
version_added: "2.8"
filter_output:
description:
- The name of output filter.
version_added: "2.8"
filter6_input:
description:
- The name of input filter for ipv6.
version_added: "2.8"
filter6_output:
description:
- The name of output filter for ipv6.
version_added: "2.8"
aggregate:
description: List of L3 interfaces definitions
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: Set ge-0/0/1 IPv4 address
junos_l3_interface:
name: ge-0/0/1
ipv4: 192.168.0.1
- name: Remove ge-0/0/1 IPv4 address
junos_l3_interface:
name: ge-0/0/1
state: absent
- name: Set ipv4 address using aggregate
junos_l3_interface:
aggregate:
- name: ge-0/0/1
ipv4: 192.0.2.1
- name: ge-0/0/2
ipv4: 192.0.2.2
ipv6: fd5d:12c9:2201:2::2
- name: Delete ipv4 address using aggregate
junos_l3_interface:
aggregate:
- name: ge-0/0/1
ipv4: 192.0.2.1
- name: ge-0/0/2
ipv4: 192.0.2.2
state: absent
"""
RETURN = """
diff:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit interfaces ge-0/0/1 unit 0 family inet]
+ address 192.0.2.1/32;
[edit interfaces ge-0/0/1 unit 0 family inet6]
+ address fd5d:12c9:2201:1::1/128;
"""
import collections
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config, to_param_list
USE_PERSISTENT_CONNECTION = True
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
ipv4=dict(),
ipv6=dict(),
filter_input=dict(),
filter_output=dict(),
filter6_input=dict(),
filter6_output=dict(),
unit=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(junos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'interfaces/interface'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('name', {'xpath': 'name', 'parent_attrib': False, 'is_key': True}),
('unit', {'xpath': 'name', 'top': 'unit', 'parent_attrib': False, 'is_key': True}),
('ipv4', {'xpath': 'inet/address/name', 'top': 'unit/family', 'is_key': True}),
('ipv6', {'xpath': 'inet6/address/name', 'top': 'unit/family', 'is_key': True}),
('filter_input', {'xpath': 'inet/filter/input', 'top': 'unit/family'}),
('filter_output', {'xpath': 'inet/filter/output', 'top': 'unit/family'}),
('filter6_input', {'xpath': 'inet6/filter/input', 'top': 'unit/family'}),
('filter6_output', {'xpath': 'inet6/filter/output', 'top': 'unit/family'}),
])
params = to_param_list(module)
requests = list()
for param in params:
# if key doesn't exist in the item, get it from module.params
for key in param:
if param.get(key) is None:
param[key] = module.params[key]
item = param.copy()
if not item['ipv4'] and not item['ipv6']:
module.fail_json(msg="one of the following is required: ipv4,ipv6")
want = map_params_to_obj(module, param_to_xpath_map, param=item)
requests.append(map_obj_to_ele(module, want, top, param=item))
diff = None
with locked_config(module):
for req in requests:
diff = load_config(module, tostring(req), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
teamstoreheddinge/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x003.py | 246 | 3875 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'a', # 0x63
'e', # 0x64
'i', # 0x65
'o', # 0x66
'u', # 0x67
'c', # 0x68
'd', # 0x69
'h', # 0x6a
'm', # 0x6b
'r', # 0x6c
't', # 0x6d
'v', # 0x6e
'x', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'\'', # 0x74
',', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'?', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'', # 0x84
'', # 0x85
'A', # 0x86
';', # 0x87
'E', # 0x88
'E', # 0x89
'I', # 0x8a
'[?]', # 0x8b
'O', # 0x8c
'[?]', # 0x8d
'U', # 0x8e
'O', # 0x8f
'I', # 0x90
'A', # 0x91
'B', # 0x92
'G', # 0x93
'D', # 0x94
'E', # 0x95
'Z', # 0x96
'E', # 0x97
'Th', # 0x98
'I', # 0x99
'K', # 0x9a
'L', # 0x9b
'M', # 0x9c
'N', # 0x9d
'Ks', # 0x9e
'O', # 0x9f
'P', # 0xa0
'R', # 0xa1
'[?]', # 0xa2
'S', # 0xa3
'T', # 0xa4
'U', # 0xa5
'Ph', # 0xa6
'Kh', # 0xa7
'Ps', # 0xa8
'O', # 0xa9
'I', # 0xaa
'U', # 0xab
'a', # 0xac
'e', # 0xad
'e', # 0xae
'i', # 0xaf
'u', # 0xb0
'a', # 0xb1
'b', # 0xb2
'g', # 0xb3
'd', # 0xb4
'e', # 0xb5
'z', # 0xb6
'e', # 0xb7
'th', # 0xb8
'i', # 0xb9
'k', # 0xba
'l', # 0xbb
'm', # 0xbc
'n', # 0xbd
'x', # 0xbe
'o', # 0xbf
'p', # 0xc0
'r', # 0xc1
's', # 0xc2
's', # 0xc3
't', # 0xc4
'u', # 0xc5
'ph', # 0xc6
'kh', # 0xc7
'ps', # 0xc8
'o', # 0xc9
'i', # 0xca
'u', # 0xcb
'o', # 0xcc
'u', # 0xcd
'o', # 0xce
'[?]', # 0xcf
'b', # 0xd0
'th', # 0xd1
'U', # 0xd2
'U', # 0xd3
'U', # 0xd4
'ph', # 0xd5
'p', # 0xd6
'&', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'St', # 0xda
'st', # 0xdb
'W', # 0xdc
'w', # 0xdd
'Q', # 0xde
'q', # 0xdf
'Sp', # 0xe0
'sp', # 0xe1
'Sh', # 0xe2
'sh', # 0xe3
'F', # 0xe4
'f', # 0xe5
'Kh', # 0xe6
'kh', # 0xe7
'H', # 0xe8
'h', # 0xe9
'G', # 0xea
'g', # 0xeb
'CH', # 0xec
'ch', # 0xed
'Ti', # 0xee
'ti', # 0xef
'k', # 0xf0
'r', # 0xf1
'c', # 0xf2
'j', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
keithasaurus/django_fun_views | fun_views/views/generic/update/render.py | 1 | 1463 | from fun_views.patterns.update.render import update_render_pattern
from fun_views.views.utils import (get_context_base, make_base_view,
not_set_get_form_class, not_set_get_obj,
not_set_get_template_name, prefer_func,
prefer_literal, render_response_base)
update_render_base = make_base_view(update_render_pattern)
def _init_form(req_data, form_class, obj):
return form_class(instance=obj)
def update_render(obj=None,
get_obj=not_set_get_obj,
form_class=None,
get_form_class=not_set_get_form_class,
init_form=_init_form,
obj_context_name='obj',
get_obj_context_name=None,
form_context_name='form',
get_form_context_name=None,
get_context=get_context_base,
template_name=None,
get_template_name=not_set_get_template_name,
render_response=render_response_base):
return update_render_base(
prefer_literal(obj, get_obj),
prefer_literal(form_class, get_form_class),
init_form,
prefer_func(obj_context_name, get_obj_context_name),
prefer_func(form_context_name, get_form_context_name),
get_context,
prefer_literal(template_name, get_template_name),
render_response
)
| mit |
arbrandes/edx-platform | lms/djangoapps/discussion/views.py | 3 | 41396 | """
Views handling read (GET) requests for the Discussion tab and inline discussions.
"""
import logging
from functools import wraps
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.staticfiles.storage import staticfiles_storage
from django.http import Http404, HttpResponseForbidden, HttpResponseServerError
from django.shortcuts import render_to_response
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import get_language_bidi
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_http_methods
from edx_django_utils.monitoring import function_trace
from opaque_keys.edx.keys import CourseKey
from rest_framework import status
from web_fragments.fragment import Fragment
import lms.djangoapps.discussion.django_comment_client.utils as utils
import openedx.core.djangoapps.django_comment_common.comment_client as cc
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.util.json_request import JsonResponse, expect_json
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.courses import get_course_with_access
from lms.djangoapps.courseware.views.views import CourseTabView
from lms.djangoapps.discussion.config.settings import is_forum_daily_digest_enabled
from lms.djangoapps.discussion.django_comment_client.base.views import track_thread_viewed_event
from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY
from lms.djangoapps.discussion.django_comment_client.permissions import has_permission
from lms.djangoapps.discussion.django_comment_client.utils import (
add_courseware_context,
available_division_schemes,
course_discussion_division_enabled,
extract,
get_group_id_for_comments_service,
get_group_id_for_user,
get_group_names_by_id,
is_commentable_divided,
strip_none
)
from lms.djangoapps.discussion.exceptions import TeamDiscussionHiddenFromUserException
from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context
from lms.djangoapps.teams import api as team_api
from openedx.core.djangoapps.django_comment_common.models import CourseDiscussionSettings
from openedx.core.djangoapps.django_comment_common.utils import ThreadContext
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.features.course_duration_limits.access import generate_course_expired_fragment
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.discussions")
THREADS_PER_PAGE = 20
INLINE_THREADS_PER_PAGE = 20
PAGES_NEARBY_DELTA = 2
BOOTSTRAP_DISCUSSION_CSS_PATH = 'css/discussion/lms-discussion-bootstrap.css'
TEAM_PERMISSION_MESSAGE = _("Access to this discussion is restricted to team members and staff.")
def make_course_settings(course, user, include_category_map=True):
"""
Generate a JSON-serializable model for course settings, which will be used to initialize a
DiscussionCourseSettings object on the client.
"""
course_discussion_settings = CourseDiscussionSettings.get(course.id)
group_names_by_id = get_group_names_by_id(course_discussion_settings)
course_setting = {
'is_discussion_division_enabled': course_discussion_division_enabled(course_discussion_settings),
'allow_anonymous': course.allow_anonymous,
'allow_anonymous_to_peers': course.allow_anonymous_to_peers,
'groups': [
{"id": str(group_id), "name": group_name} for group_id, group_name in group_names_by_id.items()
]
}
if include_category_map:
course_setting['category_map'] = utils.get_discussion_category_map(course, user)
return course_setting
def get_threads(request, course, user_info, discussion_id=None, per_page=THREADS_PER_PAGE):
"""
This may raise an appropriate subclass of cc.utils.CommentClientError
if something goes wrong, or ValueError if the group_id is invalid.
Arguments:
request (WSGIRequest): The user request.
course (CourseBlockWithMixins): The course object.
user_info (dict): The comment client User object as a dict.
discussion_id (unicode): Optional discussion id/commentable id for context.
per_page (int): Optional number of threads per page.
Returns:
(tuple of list, dict): A tuple of the list of threads and a dict of the
query parameters used for the search.
"""
default_query_params = {
'page': 1,
'per_page': per_page,
'sort_key': 'activity',
'text': '',
'course_id': str(course.id),
'user_id': request.user.id,
'context': ThreadContext.COURSE,
'group_id': get_group_id_for_comments_service(request, course.id, discussion_id), # may raise ValueError
}
# If provided with a discussion id, filter by discussion id in the
# comments_service.
if discussion_id is not None:
default_query_params['commentable_id'] = discussion_id
# Use the discussion id/commentable id to determine the context we are going to pass through to the backend.
if team_api.get_team_by_discussion(discussion_id) is not None:
default_query_params['context'] = ThreadContext.STANDALONE
_check_team_discussion_access(request, course, discussion_id)
if not request.GET.get('sort_key'):
# If the user did not select a sort key, use their last used sort key
default_query_params['sort_key'] = user_info.get('default_sort_key') or default_query_params['sort_key']
elif request.GET.get('sort_key') != user_info.get('default_sort_key'):
# If the user clicked a sort key, update their default sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.default_sort_key = request.GET.get('sort_key')
cc_user.save()
#there are 2 dimensions to consider when executing a search with respect to group id
#is user a moderator
#did the user request a group
query_params = default_query_params.copy()
query_params.update(
strip_none(
extract(
request.GET,
[
'page',
'sort_key',
'text',
'commentable_ids',
'flagged',
'unread',
'unanswered',
]
)
)
)
paginated_results = cc.Thread.search(query_params)
threads = paginated_results.collection
# If not provided with a discussion id, filter threads by commentable ids
# which are accessible to the current user.
if discussion_id is None:
discussion_category_ids = set(utils.get_discussion_categories_ids(course, request.user))
threads = [
thread for thread in threads
if thread.get('commentable_id') in discussion_category_ids
]
for thread in threads:
# patch for backward compatibility to comments service
if 'pinned' not in thread:
thread['pinned'] = False
query_params['page'] = paginated_results.page
query_params['num_pages'] = paginated_results.num_pages
query_params['corrected_text'] = paginated_results.corrected_text
return threads, query_params
def use_bulk_ops(view_func):
"""
Wraps internal request handling inside a modulestore bulk op, significantly
reducing redundant database calls. Also converts the course_id parsed from
the request uri to a CourseKey before passing to the view.
"""
@wraps(view_func)
def wrapped_view(request, course_id, *args, **kwargs):
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
return view_func(request, course_key, *args, **kwargs)
return wrapped_view
@login_required
@use_bulk_ops
def inline_discussion(request, course_key, discussion_id):
"""
Renders JSON for DiscussionModules
"""
with function_trace('get_course_and_user_info'):
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=True)
cc_user = cc.User.from_django_user(request.user)
user_info = cc_user.to_dict()
try:
with function_trace('get_threads'):
threads, query_params = get_threads(
request, course, user_info, discussion_id, per_page=INLINE_THREADS_PER_PAGE
)
except ValueError:
return HttpResponseServerError('Invalid group_id')
except TeamDiscussionHiddenFromUserException:
return HttpResponseForbidden(TEAM_PERMISSION_MESSAGE)
with function_trace('get_metadata_for_threads'):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
with function_trace('determine_group_permissions'):
is_staff = has_permission(request.user, 'openclose_thread', course.id)
course_discussion_settings = CourseDiscussionSettings.get(course.id)
group_names_by_id = get_group_names_by_id(course_discussion_settings)
course_is_divided = course_discussion_settings.division_scheme is not CourseDiscussionSettings.NONE
with function_trace('prepare_content'):
threads = [
utils.prepare_content(
thread,
course_key,
is_staff,
course_is_divided,
group_names_by_id
) for thread in threads
]
return utils.JsonResponse({
'is_commentable_divided': is_commentable_divided(course_key, discussion_id),
'discussion_data': threads,
'user_info': user_info,
'user_group_id': get_group_id_for_user(request.user, course_discussion_settings),
'annotated_content_info': annotated_content_info,
'page': query_params['page'],
'num_pages': query_params['num_pages'],
'roles': utils.get_role_ids(course_key),
'course_settings': make_course_settings(course, request.user, False)
})
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@login_required
@use_bulk_ops
def forum_form_discussion(request, course_key):
"""
Renders the main Discussion page, potentially filtered by a search query
"""
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=True)
request.user.is_community_ta = utils.is_user_community_ta(request.user, course.id)
if request.is_ajax():
user = cc.User.from_django_user(request.user)
user_info = user.to_dict()
try:
unsafethreads, query_params = get_threads(request, course, user_info) # This might process a search query
is_staff = has_permission(request.user, 'openclose_thread', course.id)
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in unsafethreads]
except cc.utils.CommentClientMaintenanceError:
return HttpResponseServerError('Forum is in maintenance mode', status=status.HTTP_503_SERVICE_UNAVAILABLE)
except ValueError:
return HttpResponseServerError("Invalid group_id")
with function_trace("get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
with function_trace("add_courseware_context"):
add_courseware_context(threads, course, request.user)
return utils.JsonResponse({
'discussion_data': threads, # TODO: Standardize on 'discussion_data' vs 'threads'
'annotated_content_info': annotated_content_info,
'num_pages': query_params['num_pages'],
'page': query_params['page'],
'corrected_text': query_params['corrected_text'],
})
else:
course_id = str(course.id)
tab_view = CourseTabView()
return tab_view.get(request, course_id, 'discussion')
@require_GET
@login_required
@use_bulk_ops
def single_thread(request, course_key, discussion_id, thread_id):
"""
Renders a response to display a single discussion thread. This could either be a page refresh
after navigating to a single thread, a direct link to a single thread, or an AJAX call from the
discussions UI loading the responses/comments for a single thread.
Depending on the HTTP headers, we'll adjust our response accordingly.
"""
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=True)
request.user.is_community_ta = utils.is_user_community_ta(request.user, course.id)
if request.is_ajax():
cc_user = cc.User.from_django_user(request.user)
user_info = cc_user.to_dict()
is_staff = has_permission(request.user, 'openclose_thread', course.id)
try:
_check_team_discussion_access(request, course, discussion_id)
except TeamDiscussionHiddenFromUserException:
return HttpResponseForbidden(TEAM_PERMISSION_MESSAGE)
thread = _load_thread_for_viewing(
request,
course,
discussion_id=discussion_id,
thread_id=thread_id,
raise_event=True,
)
with function_trace("get_annotated_content_infos"):
annotated_content_info = utils.get_annotated_content_infos(
course_key,
thread,
request.user,
user_info=user_info
)
content = utils.prepare_content(thread.to_dict(), course_key, is_staff)
with function_trace("add_courseware_context"):
add_courseware_context([content], course, request.user)
return utils.JsonResponse({
'content': content,
'annotated_content_info': annotated_content_info,
})
else:
course_id = str(course.id)
tab_view = CourseTabView()
return tab_view.get(request, course_id, 'discussion', discussion_id=discussion_id, thread_id=thread_id)
def _find_thread(request, course, discussion_id, thread_id):
"""
Finds the discussion thread with the specified ID.
Args:
request: The Django request.
course_id: The ID of the owning course.
discussion_id: The ID of the owning discussion.
thread_id: The ID of the thread.
Returns:
The thread in question if the user can see it, else None.
"""
try:
thread = cc.Thread.find(thread_id).retrieve(
with_responses=request.is_ajax(),
recursive=request.is_ajax(),
user_id=request.user.id,
response_skip=request.GET.get("resp_skip"),
response_limit=request.GET.get("resp_limit")
)
except cc.utils.CommentClientRequestError:
return None
# Verify that the student has access to this thread if belongs to a course discussion module
thread_context = getattr(thread, "context", "course")
if thread_context == "course" and not utils.discussion_category_id_access(course, request.user, discussion_id):
return None
# verify that the thread belongs to the requesting student's group
is_moderator = has_permission(request.user, "see_all_cohorts", course.id)
course_discussion_settings = CourseDiscussionSettings.get(course.id)
if is_commentable_divided(course.id, discussion_id, course_discussion_settings) and not is_moderator:
user_group_id = get_group_id_for_user(request.user, course_discussion_settings)
if getattr(thread, "group_id", None) is not None and user_group_id != thread.group_id:
return None
return thread
def _load_thread_for_viewing(request, course, discussion_id, thread_id, raise_event):
"""
Loads the discussion thread with the specified ID and fires an
edx.forum.thread.viewed event.
Args:
request: The Django request.
course_id: The ID of the owning course.
discussion_id: The ID of the owning discussion.
thread_id: The ID of the thread.
raise_event: Whether an edx.forum.thread.viewed tracking event should
be raised
Returns:
The thread in question if the user can see it.
Raises:
Http404 if the thread does not exist or the user cannot
see it.
"""
thread = _find_thread(request, course, discussion_id=discussion_id, thread_id=thread_id)
if not thread:
raise Http404
if raise_event:
track_thread_viewed_event(request, course, thread)
return thread
def _create_base_discussion_view_context(request, course_key):
"""
Returns the default template context for rendering any discussion view.
"""
user = request.user
cc_user = cc.User.from_django_user(user)
user_info = cc_user.to_dict()
course = get_course_with_access(user, 'load', course_key, check_if_enrolled=True)
course_settings = make_course_settings(course, user)
return {
'csrf': csrf(request)['csrf_token'],
'course': course,
'user': user,
'user_info': user_info,
'staff_access': bool(has_access(user, 'staff', course)),
'roles': utils.get_role_ids(course_key),
'can_create_comment': has_permission(user, "create_comment", course.id),
'can_create_subcomment': has_permission(user, "create_sub_comment", course.id),
'can_create_thread': has_permission(user, "create_thread", course.id),
'flag_moderator': bool(
has_permission(user, 'openclose_thread', course.id) or
has_access(user, 'staff', course)
),
'course_settings': course_settings,
'disable_courseware_js': True,
'uses_bootstrap': True,
}
def _get_discussion_default_topic_id(course):
for topic, entry in course.discussion_topics.items(): # lint-amnesty, pylint: disable=unused-variable
if entry.get('default') is True:
return entry['id']
def _create_discussion_board_context(request, base_context, thread=None):
"""
Returns the template context for rendering the discussion board.
"""
context = base_context.copy()
course = context['course']
course_key = course.id
thread_id = thread.id if thread else None
discussion_id = thread.commentable_id if thread else None
course_settings = context['course_settings']
user = context['user']
cc_user = cc.User.from_django_user(user)
user_info = context['user_info']
if thread:
_check_team_discussion_access(request, course, discussion_id)
# Since we're in page render mode, and the discussions UI will request the thread list itself,
# we need only return the thread information for this one.
threads = [thread.to_dict()]
for thread in threads: # lint-amnesty, pylint: disable=redefined-argument-from-local
# patch for backward compatibility with comments service
if "pinned" not in thread:
thread["pinned"] = False
thread_pages = 1
root_url = reverse('forum_form_discussion', args=[str(course.id)])
else:
threads, query_params = get_threads(request, course, user_info) # This might process a search query
thread_pages = query_params['num_pages']
root_url = request.path
is_staff = has_permission(user, 'openclose_thread', course.id)
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in threads]
with function_trace("get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, user, user_info)
with function_trace("add_courseware_context"):
add_courseware_context(threads, course, user)
with function_trace("get_cohort_info"):
course_discussion_settings = CourseDiscussionSettings.get(course_key)
user_group_id = get_group_id_for_user(user, course_discussion_settings)
context.update({
'root_url': root_url,
'discussion_id': discussion_id,
'thread_id': thread_id,
'threads': threads,
'thread_pages': thread_pages,
'annotated_content_info': annotated_content_info,
'is_moderator': has_permission(user, "see_all_cohorts", course_key),
'groups': course_settings["groups"], # still needed to render _thread_list_template
'user_group_id': user_group_id, # read from container in NewPostView
'sort_preference': cc_user.default_sort_key,
'category_map': course_settings["category_map"],
'course_settings': course_settings,
'is_commentable_divided': is_commentable_divided(course_key, discussion_id, course_discussion_settings),
# If the default topic id is None the front-end code will look for a topic that contains "General"
'discussion_default_topic_id': _get_discussion_default_topic_id(course),
'enable_daily_digest': is_forum_daily_digest_enabled()
})
context.update(
get_experiment_user_metadata_context(
course,
user,
)
)
return context
def create_user_profile_context(request, course_key, user_id):
""" Generate a context dictionary for the user profile. """
user = cc.User.from_django_user(request.user)
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=True)
# If user is not enrolled in the course, do not proceed.
django_user = User.objects.get(id=user_id)
if not CourseEnrollment.is_enrolled(django_user, course.id):
raise Http404
query_params = {
'page': request.GET.get('page', 1),
'per_page': THREADS_PER_PAGE, # more than threads_per_page to show more activities
}
group_id = get_group_id_for_comments_service(request, course_key)
if group_id is not None:
query_params['group_id'] = group_id
profiled_user = cc.User(id=user_id, course_id=course_key, group_id=group_id)
else:
profiled_user = cc.User(id=user_id, course_id=course_key)
threads, page, num_pages = profiled_user.active_threads(query_params)
query_params['page'] = page
query_params['num_pages'] = num_pages
with function_trace("get_metadata_for_threads"):
user_info = cc.User.from_django_user(request.user).to_dict()
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
is_staff = has_permission(request.user, 'openclose_thread', course.id)
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in threads]
with function_trace("add_courseware_context"):
add_courseware_context(threads, course, request.user)
# TODO: LEARNER-3854: If we actually implement Learner Analytics code, this
# code was original protected to not run in user_profile() if is_ajax().
# Someone should determine if that is still necessary (i.e. was that ever
# called as is_ajax()) and clean this up as necessary.
user_roles = django_user.roles.filter(
course_id=course.id
).order_by("name").values_list("name", flat=True).distinct()
with function_trace("get_cohort_info"):
course_discussion_settings = CourseDiscussionSettings.get(course_key)
user_group_id = get_group_id_for_user(request.user, course_discussion_settings)
context = _create_base_discussion_view_context(request, course_key)
context.update({
'django_user': django_user,
'django_user_roles': user_roles,
'profiled_user': profiled_user.to_dict(),
'threads': threads,
'user_group_id': user_group_id,
'annotated_content_info': annotated_content_info,
'page': query_params['page'],
'num_pages': query_params['num_pages'],
'sort_preference': user.default_sort_key,
'learner_profile_page_url': reverse('learner_profile', kwargs={'username': django_user.username}),
})
return context
@require_GET
@login_required
@use_bulk_ops
def user_profile(request, course_key, user_id):
"""
Renders a response to display the user profile page (shown after clicking
on a post author's username).
"""
try:
context = create_user_profile_context(request, course_key, user_id)
if request.is_ajax():
return utils.JsonResponse({
'discussion_data': context['threads'],
'page': context['page'],
'num_pages': context['num_pages'],
'annotated_content_info': context['annotated_content_info'],
})
else:
tab_view = CourseTabView()
# To avoid mathjax loading from 'mathjax_include.html'
# as that file causes multiple loadings of Mathjax on
# 'user_profile' page
context['load_mathjax'] = False
return tab_view.get(request, str(course_key), 'discussion', profile_page_context=context)
except User.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
except ValueError:
return HttpResponseServerError("Invalid group_id")
@login_required
@use_bulk_ops
def followed_threads(request, course_key, user_id):
"""
Ajax-only endpoint retrieving the threads followed by a specific user.
"""
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=True)
try:
profiled_user = cc.User(id=user_id, course_id=course_key)
query_params = {
'page': 1,
'per_page': THREADS_PER_PAGE, # more than threads_per_page to show more activities
'sort_key': 'date',
}
query_params.update(
strip_none(
extract(
request.GET,
[
'page',
'sort_key',
'flagged',
'unread',
'unanswered',
]
)
)
)
try:
group_id = get_group_id_for_comments_service(request, course_key)
except ValueError:
return HttpResponseServerError("Invalid group_id")
if group_id is not None:
query_params['group_id'] = group_id
paginated_results = profiled_user.subscribed_threads(query_params)
print("\n \n \n paginated results \n \n \n ")
print(paginated_results)
query_params['page'] = paginated_results.page
query_params['num_pages'] = paginated_results.num_pages
user_info = cc.User.from_django_user(request.user).to_dict()
with function_trace("get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(
course_key,
paginated_results.collection,
request.user, user_info
)
if request.is_ajax():
is_staff = has_permission(request.user, 'openclose_thread', course.id)
return utils.JsonResponse({
'annotated_content_info': annotated_content_info,
'discussion_data': [
utils.prepare_content(thread, course_key, is_staff) for thread in paginated_results.collection
],
'page': query_params['page'],
'num_pages': query_params['num_pages'],
})
#TODO remove non-AJAX support, it does not appear to be used and does not appear to work.
else:
context = {
'course': course,
'user': request.user,
'django_user': User.objects.get(id=user_id),
'profiled_user': profiled_user.to_dict(),
'threads': paginated_results.collection,
'user_info': user_info,
'annotated_content_info': annotated_content_info,
# 'content': content,
}
return render_to_response('discussion/user_profile.html', context)
except User.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
class DiscussionBoardFragmentView(EdxFragmentView):
"""
Component implementation of the discussion board.
"""
def render_to_fragment( # lint-amnesty, pylint: disable=arguments-differ
self,
request,
course_id=None,
discussion_id=None,
thread_id=None,
profile_page_context=None,
**kwargs
):
"""
Render the discussion board to a fragment.
Args:
request: The Django request.
course_id: The id of the course in question.
discussion_id: An optional discussion ID to be focused upon.
thread_id: An optional ID of the thread to be shown.
Returns:
Fragment: The fragment representing the discussion board
"""
try:
course_key = CourseKey.from_string(course_id)
base_context = _create_base_discussion_view_context(request, course_key)
# Note:
# After the thread is rendered in this fragment, an AJAX
# request is made and the thread is completely loaded again
# (yes, this is something to fix). Because of this, we pass in
# raise_event=False to _load_thread_for_viewing avoid duplicate
# tracking events.
thread = (
_load_thread_for_viewing(
request,
base_context['course'],
discussion_id=discussion_id,
thread_id=thread_id,
raise_event=False,
)
if thread_id
else None
)
context = _create_discussion_board_context(request, base_context, thread=thread)
course_expiration_fragment = generate_course_expired_fragment(request.user, context['course'])
context.update({
'course_expiration_fragment': course_expiration_fragment,
})
if profile_page_context:
# EDUCATOR-2119: styles are hard to reconcile if the profile page isn't also a fragment
html = render_to_string('discussion/discussion_profile_page.html', profile_page_context)
else:
html = render_to_string('discussion/discussion_board_fragment.html', context)
fragment = Fragment(html)
self.add_fragment_resource_urls(fragment)
inline_js = render_to_string('discussion/discussion_board_js.template', context)
fragment.add_javascript(inline_js)
if not settings.REQUIRE_DEBUG:
fragment.add_javascript_url(staticfiles_storage.url('discussion/js/discussion_board_factory.js'))
return fragment
except cc.utils.CommentClientMaintenanceError:
log.warning('Forum is in maintenance mode')
html = render_to_string('discussion/maintenance_fragment.html', {
'disable_courseware_js': True,
'uses_bootstrap': True,
})
fragment = Fragment(html)
self.add_fragment_resource_urls(fragment)
return fragment
except TeamDiscussionHiddenFromUserException:
log.warning(
'User with id={user_id} tried to view private discussion with id={discussion_id}'.format(
user_id=request.user.id,
discussion_id=discussion_id
)
)
html = render_to_string('discussion/discussion_private_fragment.html', {
'disable_courseware_js': True,
'uses_bootstrap': True,
})
fragment = Fragment(html)
self.add_fragment_resource_urls(fragment)
return fragment
def vendor_js_dependencies(self):
"""
Returns list of vendor JS files that this view depends on.
The helper function that it uses to obtain the list of vendor JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return list(dict.fromkeys(self.get_js_dependencies('discussion_vendor')))
def js_dependencies(self):
"""
Returns list of JS files that this view depends on.
The helper function that it uses to obtain the list of JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return self.get_js_dependencies('discussion')
def css_dependencies(self):
"""
Returns list of CSS files that this view depends on.
The helper function that it uses to obtain the list of CSS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
is_right_to_left = get_language_bidi()
css_file = BOOTSTRAP_DISCUSSION_CSS_PATH
if is_right_to_left:
css_file = css_file.replace('.css', '-rtl.css')
return [css_file]
@expect_json
@login_required
def discussion_topics(request, course_key_string):
"""
The handler for divided discussion categories requests.
This will raise 404 if user is not staff.
Returns the JSON representation of discussion topics w.r.t categories for the course.
Example:
>>> example = {
>>> "course_wide_discussions": {
>>> "entries": {
>>> "General": {
>>> "sort_key": "General",
>>> "is_divided": True,
>>> "id": "i4x-edx-eiorguegnru-course-foobarbaz"
>>> }
>>> }
>>> "children": ["General", "entry"]
>>> },
>>> "inline_discussions" : {
>>> "subcategories": {
>>> "Getting Started": {
>>> "subcategories": {},
>>> "children": [
>>> ["Working with Videos", "entry"],
>>> ["Videos on edX", "entry"]
>>> ],
>>> "entries": {
>>> "Working with Videos": {
>>> "sort_key": None,
>>> "is_divided": False,
>>> "id": "d9f970a42067413cbb633f81cfb12604"
>>> },
>>> "Videos on edX": {
>>> "sort_key": None,
>>> "is_divided": False,
>>> "id": "98d8feb5971041a085512ae22b398613"
>>> }
>>> }
>>> },
>>> "children": ["Getting Started", "subcategory"]
>>> },
>>> }
>>> }
"""
course_key = CourseKey.from_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
discussion_topics = {} # lint-amnesty, pylint: disable=redefined-outer-name
discussion_category_map = utils.get_discussion_category_map(
course, request.user, divided_only_if_explicit=True, exclude_unstarted=False
)
# We extract the data for the course wide discussions from the category map.
course_wide_entries = discussion_category_map.pop('entries')
course_wide_children = []
inline_children = []
for name, c_type in discussion_category_map['children']:
if name in course_wide_entries and c_type == TYPE_ENTRY:
course_wide_children.append([name, c_type])
else:
inline_children.append([name, c_type])
discussion_topics['course_wide_discussions'] = {
'entries': course_wide_entries,
'children': course_wide_children
}
discussion_category_map['children'] = inline_children
discussion_topics['inline_discussions'] = discussion_category_map
return JsonResponse(discussion_topics)
@require_http_methods(("GET", "PATCH"))
@ensure_csrf_cookie
@expect_json
@login_required
def course_discussions_settings_handler(request, course_key_string):
"""
The restful handler for divided discussion setting requests. Requires JSON.
This will raise 404 if user is not staff.
GET
Returns the JSON representation of divided discussion settings for the course.
PATCH
Updates the divided discussion settings for the course. Returns the JSON representation of updated settings.
"""
course_key = CourseKey.from_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
discussion_settings = CourseDiscussionSettings.get(course_key)
if request.method == 'PATCH':
divided_course_wide_discussions, divided_inline_discussions = get_divided_discussions(
course, discussion_settings
)
settings_to_change = {}
if 'divided_course_wide_discussions' in request.json or 'divided_inline_discussions' in request.json:
divided_course_wide_discussions = request.json.get(
'divided_course_wide_discussions', divided_course_wide_discussions
)
divided_inline_discussions = request.json.get(
'divided_inline_discussions', divided_inline_discussions
)
settings_to_change['divided_discussions'] = divided_course_wide_discussions + divided_inline_discussions
if 'always_divide_inline_discussions' in request.json:
settings_to_change['always_divide_inline_discussions'] = request.json.get(
'always_divide_inline_discussions'
)
if 'division_scheme' in request.json:
settings_to_change['division_scheme'] = request.json.get(
'division_scheme'
)
if not settings_to_change:
return JsonResponse({"error": "Bad Request"}, 400)
try:
if settings_to_change:
discussion_settings.update(settings_to_change)
except ValueError as err:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": str(err)}, 400)
divided_course_wide_discussions, divided_inline_discussions = get_divided_discussions(
course, discussion_settings
)
return JsonResponse({
'id': discussion_settings.id,
'divided_inline_discussions': divided_inline_discussions,
'divided_course_wide_discussions': divided_course_wide_discussions,
'always_divide_inline_discussions': discussion_settings.always_divide_inline_discussions,
'division_scheme': discussion_settings.division_scheme,
'available_division_schemes': available_division_schemes(course_key)
})
def get_divided_discussions(course, discussion_settings):
"""
Returns the course-wide and inline divided discussion ids separately.
"""
divided_course_wide_discussions = []
divided_inline_discussions = []
course_wide_discussions = [topic['id'] for __, topic in course.discussion_topics.items()]
all_discussions = utils.get_discussion_categories_ids(course, None, include_all=True)
for divided_discussion_id in discussion_settings.divided_discussions:
if divided_discussion_id in course_wide_discussions:
divided_course_wide_discussions.append(divided_discussion_id)
elif divided_discussion_id in all_discussions:
divided_inline_discussions.append(divided_discussion_id)
return divided_course_wide_discussions, divided_inline_discussions
def _check_team_discussion_access(request, course, discussion_id):
"""
Helper function to check if the discussion is visible to the user,
if the user is on a team, which has the discussion set to private.
"""
user_is_course_staff = has_access(request.user, "staff", course)
if not user_is_course_staff and not team_api.discussion_visible_by_user(discussion_id, request.user):
raise TeamDiscussionHiddenFromUserException()
| agpl-3.0 |
AtonLerin/pymel | pymel/__init__.py | 1 | 1263 |
# copyright Chad Dombrova [email protected]
# created at luma pictures www.luma-pictures.com
"""
*******************************
PyMEL
*******************************
PyMEL makes python scripting in Maya work the way it should. Maya's command module is a direct
translation of MEL commands into python functions. The result is a very awkward and unpythonic syntax which
does not take advantage of python's strengths -- particularly, a flexible, object-oriented design. PyMEL
builds on the cmds module by organizing many of its commands into a class hierarchy, and by
customizing them to operate in a more succinct and intuitive way.
=======================================
Special Thanks
=======================================
Special thanks to those studios with the foresight to support an open-source project of this nature: Luma Pictures,
Attitude Studio, and ImageMovers Digital.
"""
__versiontuple__ = (1, 0, 9)
__version__ = '.'.join(str(x) for x in __versiontuple__)
__authors__ = ['Chad Dombrova', 'Olivier Renouard', 'Ofer Koren', 'Paul Molodowitch']
import sys
assert sys.version_info > (2, 6), ("pymel version %s is compatible with Maya2013/python2.6 or later" % __version__)
#import internal.plogging as plogging
| bsd-3-clause |
zstackio/zstack-woodpecker | integrationtest/vm/mini/test_life_cycle_vm.py | 1 | 3182 | '''
VM life cycle test for Mini
@author: Zhaohao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
import random
vm = None
#state
RUNNING = inventory.RUNNING
STOPPED = inventory.STOPPED
DESTROYED = inventory.DESTROYED
PAUSED = inventory.PAUSED
EXPUNGED = 'EXPUNGED'
VM_CPU = [1, 2, 3, 4, 7, 8, 15, 16, 20, 32]
# 128M, 256M, 512M, 1G, 2G
VM_MEM = [134217728, 268435456, 536870912, 1073741824, 2147483648]
def random_operations(vm):
path = []
count = 1
RUNNING_OPS = {vm.suspend:'SUSPEND', vm.reboot:'REBOOT', vm.stop:'STOP', vm.destroy:'DESTROY'}
#STOPPED_OPS = {vm.reinit:'REINIT', vm.start:'START', vm.destroy:'DESTROY'}
STOPPED_OPS = {vm.start:'START', vm.destroy:'DESTROY'}
SUSPENDED_OPS = {vm.resume:'RESUME', vm.destroy:'DESTROY'}
DESTROYED_OPS = {vm.recover:'RECOVER', vm.expunge:'EXPUNGED'}
while vm.state!=EXPUNGED:
if vm.state == RUNNING:
op = random.choice(RUNNING_OPS.keys())
op()
path.append("{}.{}".format(str(count), RUNNING_OPS[op]))
count += 1
continue
elif vm.state == STOPPED:
op = random.choice(STOPPED_OPS.keys())
op()
path.append("{}.{}".format(str(count), STOPPED_OPS[op]))
count += 1
continue
elif vm.state == PAUSED:
op = random.choice(SUSPENDED_OPS.keys())
op()
path.append("{}.{}".format(str(count), SUSPENDED_OPS[op]))
count += 1
continue
elif vm.state == DESTROYED:
op = random.choice(DESTROYED_OPS.keys())
op()
path.append("{}.{}".format(str(count), DESTROYED_OPS[op]))
count += 1
continue
else:
return '\n'.join(path)
return '\n'.join(path)
def test():
global vm
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_name('Mini_basic_vm')
for i in range(1, 10):
vm_creation_option.set_cpu_num(random.choice(VM_CPU))
vm_creation_option.set_memory_size(random.choice(VM_MEM))
vm = test_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
vm.check()
test_util.test_logger('===path%s===\n%s' % (i, random_operations(vm)))
#vm.expunge()
time.sleep(5)
test_util.test_pass('Mini VM Life Cycle Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
| apache-2.0 |
Myasuka/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
ressu/SickGear | lib/unidecode/x08b.py | 252 | 4643 | data = (
'Mou ', # 0x00
'Ye ', # 0x01
'Wei ', # 0x02
'[?] ', # 0x03
'Teng ', # 0x04
'Zou ', # 0x05
'Shan ', # 0x06
'Jian ', # 0x07
'Bo ', # 0x08
'Ku ', # 0x09
'Huang ', # 0x0a
'Huo ', # 0x0b
'Ge ', # 0x0c
'Ying ', # 0x0d
'Mi ', # 0x0e
'Xiao ', # 0x0f
'Mi ', # 0x10
'Xi ', # 0x11
'Qiang ', # 0x12
'Chen ', # 0x13
'Nue ', # 0x14
'Ti ', # 0x15
'Su ', # 0x16
'Bang ', # 0x17
'Chi ', # 0x18
'Qian ', # 0x19
'Shi ', # 0x1a
'Jiang ', # 0x1b
'Yuan ', # 0x1c
'Xie ', # 0x1d
'Xue ', # 0x1e
'Tao ', # 0x1f
'Yao ', # 0x20
'Yao ', # 0x21
'[?] ', # 0x22
'Yu ', # 0x23
'Biao ', # 0x24
'Cong ', # 0x25
'Qing ', # 0x26
'Li ', # 0x27
'Mo ', # 0x28
'Mo ', # 0x29
'Shang ', # 0x2a
'Zhe ', # 0x2b
'Miu ', # 0x2c
'Jian ', # 0x2d
'Ze ', # 0x2e
'Jie ', # 0x2f
'Lian ', # 0x30
'Lou ', # 0x31
'Can ', # 0x32
'Ou ', # 0x33
'Guan ', # 0x34
'Xi ', # 0x35
'Zhuo ', # 0x36
'Ao ', # 0x37
'Ao ', # 0x38
'Jin ', # 0x39
'Zhe ', # 0x3a
'Yi ', # 0x3b
'Hu ', # 0x3c
'Jiang ', # 0x3d
'Man ', # 0x3e
'Chao ', # 0x3f
'Han ', # 0x40
'Hua ', # 0x41
'Chan ', # 0x42
'Xu ', # 0x43
'Zeng ', # 0x44
'Se ', # 0x45
'Xi ', # 0x46
'She ', # 0x47
'Dui ', # 0x48
'Zheng ', # 0x49
'Nao ', # 0x4a
'Lan ', # 0x4b
'E ', # 0x4c
'Ying ', # 0x4d
'Jue ', # 0x4e
'Ji ', # 0x4f
'Zun ', # 0x50
'Jiao ', # 0x51
'Bo ', # 0x52
'Hui ', # 0x53
'Zhuan ', # 0x54
'Mu ', # 0x55
'Zen ', # 0x56
'Zha ', # 0x57
'Shi ', # 0x58
'Qiao ', # 0x59
'Tan ', # 0x5a
'Zen ', # 0x5b
'Pu ', # 0x5c
'Sheng ', # 0x5d
'Xuan ', # 0x5e
'Zao ', # 0x5f
'Tan ', # 0x60
'Dang ', # 0x61
'Sui ', # 0x62
'Qian ', # 0x63
'Ji ', # 0x64
'Jiao ', # 0x65
'Jing ', # 0x66
'Lian ', # 0x67
'Nou ', # 0x68
'Yi ', # 0x69
'Ai ', # 0x6a
'Zhan ', # 0x6b
'Pi ', # 0x6c
'Hui ', # 0x6d
'Hua ', # 0x6e
'Yi ', # 0x6f
'Yi ', # 0x70
'Shan ', # 0x71
'Rang ', # 0x72
'Nou ', # 0x73
'Qian ', # 0x74
'Zhui ', # 0x75
'Ta ', # 0x76
'Hu ', # 0x77
'Zhou ', # 0x78
'Hao ', # 0x79
'Ye ', # 0x7a
'Ying ', # 0x7b
'Jian ', # 0x7c
'Yu ', # 0x7d
'Jian ', # 0x7e
'Hui ', # 0x7f
'Du ', # 0x80
'Zhe ', # 0x81
'Xuan ', # 0x82
'Zan ', # 0x83
'Lei ', # 0x84
'Shen ', # 0x85
'Wei ', # 0x86
'Chan ', # 0x87
'Li ', # 0x88
'Yi ', # 0x89
'Bian ', # 0x8a
'Zhe ', # 0x8b
'Yan ', # 0x8c
'E ', # 0x8d
'Chou ', # 0x8e
'Wei ', # 0x8f
'Chou ', # 0x90
'Yao ', # 0x91
'Chan ', # 0x92
'Rang ', # 0x93
'Yin ', # 0x94
'Lan ', # 0x95
'Chen ', # 0x96
'Huo ', # 0x97
'Zhe ', # 0x98
'Huan ', # 0x99
'Zan ', # 0x9a
'Yi ', # 0x9b
'Dang ', # 0x9c
'Zhan ', # 0x9d
'Yan ', # 0x9e
'Du ', # 0x9f
'Yan ', # 0xa0
'Ji ', # 0xa1
'Ding ', # 0xa2
'Fu ', # 0xa3
'Ren ', # 0xa4
'Ji ', # 0xa5
'Jie ', # 0xa6
'Hong ', # 0xa7
'Tao ', # 0xa8
'Rang ', # 0xa9
'Shan ', # 0xaa
'Qi ', # 0xab
'Tuo ', # 0xac
'Xun ', # 0xad
'Yi ', # 0xae
'Xun ', # 0xaf
'Ji ', # 0xb0
'Ren ', # 0xb1
'Jiang ', # 0xb2
'Hui ', # 0xb3
'Ou ', # 0xb4
'Ju ', # 0xb5
'Ya ', # 0xb6
'Ne ', # 0xb7
'Xu ', # 0xb8
'E ', # 0xb9
'Lun ', # 0xba
'Xiong ', # 0xbb
'Song ', # 0xbc
'Feng ', # 0xbd
'She ', # 0xbe
'Fang ', # 0xbf
'Jue ', # 0xc0
'Zheng ', # 0xc1
'Gu ', # 0xc2
'He ', # 0xc3
'Ping ', # 0xc4
'Zu ', # 0xc5
'Shi ', # 0xc6
'Xiong ', # 0xc7
'Zha ', # 0xc8
'Su ', # 0xc9
'Zhen ', # 0xca
'Di ', # 0xcb
'Zou ', # 0xcc
'Ci ', # 0xcd
'Qu ', # 0xce
'Zhao ', # 0xcf
'Bi ', # 0xd0
'Yi ', # 0xd1
'Yi ', # 0xd2
'Kuang ', # 0xd3
'Lei ', # 0xd4
'Shi ', # 0xd5
'Gua ', # 0xd6
'Shi ', # 0xd7
'Jie ', # 0xd8
'Hui ', # 0xd9
'Cheng ', # 0xda
'Zhu ', # 0xdb
'Shen ', # 0xdc
'Hua ', # 0xdd
'Dan ', # 0xde
'Gou ', # 0xdf
'Quan ', # 0xe0
'Gui ', # 0xe1
'Xun ', # 0xe2
'Yi ', # 0xe3
'Zheng ', # 0xe4
'Gai ', # 0xe5
'Xiang ', # 0xe6
'Cha ', # 0xe7
'Hun ', # 0xe8
'Xu ', # 0xe9
'Zhou ', # 0xea
'Jie ', # 0xeb
'Wu ', # 0xec
'Yu ', # 0xed
'Qiao ', # 0xee
'Wu ', # 0xef
'Gao ', # 0xf0
'You ', # 0xf1
'Hui ', # 0xf2
'Kuang ', # 0xf3
'Shuo ', # 0xf4
'Song ', # 0xf5
'Ai ', # 0xf6
'Qing ', # 0xf7
'Zhu ', # 0xf8
'Zou ', # 0xf9
'Nuo ', # 0xfa
'Du ', # 0xfb
'Zhuo ', # 0xfc
'Fei ', # 0xfd
'Ke ', # 0xfe
'Wei ', # 0xff
)
| gpl-3.0 |
manasi24/jiocloud-tempest-qatempest | tempest/api/orchestration/stacks/test_templates.py | 11 | 2099 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.orchestration import base
from tempest import test
class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which creates only a new user
Resources:
CfnUser:
Type: AWS::IAM::User
"""
@classmethod
def resource_setup(cls):
super(TemplateYAMLTestJSON, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
cls.client.wait_for_stack_status(cls.stack_identifier,
'CREATE_COMPLETE')
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.parameters = {}
@test.idempotent_id('47430699-c368-495e-a1db-64c26fd967d7')
def test_show_template(self):
"""Getting template used to create the stack."""
self.client.show_template(self.stack_identifier)
@test.idempotent_id('ed53debe-8727-46c5-ab58-eba6090ec4de')
def test_validate_template(self):
"""Validating template passing it content."""
self.client.validate_template(self.template,
self.parameters)
class TemplateAWSTestJSON(TemplateYAMLTestJSON):
template = """
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template which creates only a new user",
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
}
}
}
"""
| apache-2.0 |
OCESS/serverv-py | serverv-py/qb_communication.py | 1 | 4342 | """Helper class to generalize logic of SERVERv.BAS file communication."""
from pathlib import Path, PurePath, PureWindowsPath
import filetransforms
_client_path = {}
_file_vars = {}
_file_connectors = []
def parse_sevpath(sevpath_path):
"""Parse sevpath.RND and set up module state."""
global _client_path
global _file_connectors
# Read all paths to legacy clients from sevpath.RND
clients = [
"flight", "mirror", "telemetry", "simulator", "HABeecom", "MCeecom",
"SIMeecom", "display", "HABeng", "SIMmirror", "HABdisplay"]
with open(sevpath_path, "r") as sevpaths:
for client in clients:
_client_path[client] = (Path(sevpath_path).parent /
PureWindowsPath(sevpaths.read(25).strip()))
# Create helper classes for each legacy client.
_file_connectors = [
# Block 300
FileConnector('simulator', ['HABeng'], 'ORB5res.RND', 412),
# Block 400
FileConnector('HABeecom', ['MCeecom', 'SIMeecom'],
'GASTELEMETRY.RND', 800),
# Block 500
FileConnector('MCeecom', ['HABeecom', 'SIMeecom'], 'GASMC.RND', 82),
# Block 600
FileConnector('SIMeecom', ['HABeecom'], 'GASSIM.RND', 182),
# Block 700
FileConnector('SIMeecom', ['HABeecom'], 'DOORSIM.RND', 276),
# Block 800
FileConnector('HABeng',
['flight', 'telemetry', 'simulator', 'SIMmirror'],
'ORBITSSE.RND', 1159),
# Block 900
FileConnector('display', ['flight', 'mirror'], 'MST.RND', 26),
# Block 930
FileConnector('MCeecom', ['HABeecom', 'MCeecom'], 'TIME.RND', 26)
]
def _simplify_filename(filename):
"""Return the lowercase filename, no extensions."""
return PurePath(filename.lower()).stem
class FileConnector:
"""Writes contents of src/filename to dest/filename.
I noticed that serverv.bas does a lot of the following:
read from path1/filename
set global var based on file contents
write to path2/filename
This class seeks to generalize this logic.
Call self.parsesrc to update 'global variables' (I know) in _file_vars
based on the contents of src/filename and then
call self.write_to_dest to write the contents for src/filename
to dest/filename, with a predefined transformation.
"""
def __init__(self, src, dests, filename, filesize):
"""Simply set up instance."""
self._srcpath = _client_path[src] / filename
self._destpaths = [_client_path[dest] / filename for dest in dests]
self._filesize = filesize
# Programmatically get parse, transform functions from filetransforms
# e.g. _parsesrc = simulator_orb5res_parse
self._parsesrc = getattr(
filetransforms,
src + '_' + _simplify_filename(filename) + '_parse')
self._transform = getattr(
filetransforms,
src + '_' + _simplify_filename(filename) + '_transform')
def process_src(self):
"""Read src/filename and possibly changes variables in _file_vars."""
global _file_vars
with self._srcpath.open('rb') as src:
self._parsesrc(src, _file_vars)
def write_to_dest(self):
"""Write src/filename to dest/filename with a transformation."""
global _file_vars
assert self._srcpath.stat().st_size == self._filesize
with self._srcpath.open('rb') as src:
file_contents = bytearray(src.read(self._filesize))
assert file_contents[0] == file_contents[-1]
precontents_len = len(file_contents)
self._transform(file_contents, _file_vars)
assert len(file_contents) == precontents_len
for destpath in self._destpaths:
with destpath.open('wb') as dest:
dest.write(file_contents)
assert destpath.stat().st_size == self._filesize
def perform_qb_communication():
"""Update all QB files with the proper logic."""
global _file_connectors
for connector in _file_connectors:
connector.process_src()
for connector in _file_connectors:
connector.write_to_dest()
| mit |
browseinfo/odoo_saas3_nicolas | addons/crm_partner_assign/wizard/crm_channel_interested.py | 263 | 3795 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
class crm_lead_forward_to_partner(osv.TransientModel):
""" Forward info history to partners. """
_name = 'crm.lead.channel.interested'
_columns = {
'interested': fields.boolean('Interested by this lead'),
'contacted': fields.boolean('Did you contact the lead?', help="The lead has been contacted"),
'comment': fields.text('Comment', help="What are the elements that have led to this decision?", required=True),
}
_defaults = {
'interested': lambda self, cr, uid, c: c.get('interested', True),
'contacted': False,
}
def action_confirm(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
if wizard.interested and not wizard.contacted:
raise osv.except_osv(_('Error!'), _("You must contact the lead before saying that you are interested"))
lead_obj = self.pool.get('crm.lead')
lead_obj.check_access_rights(cr, uid, 'write')
if wizard.interested:
message = _('<p>I am interested by this lead.</p>')
values = {}
else:
stage = 'stage_portal_lead_recycle'
message = _('<p>I am not interested by this lead. I %scontacted the lead.</p>') % (not wizard.contacted and 'have not ' or '')
values = {'partner_assigned_id': False}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
partner_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [('id', 'child_of', user.partner_id.commercial_partner_id.id)], context=context)
lead_obj.message_unsubscribe(cr, SUPERUSER_ID, context.get('active_ids', []), partner_ids, context=None)
try:
stage_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', stage)[1]
except ValueError:
stage_id = False
if stage_id:
values.update({'stage_id': stage_id})
if wizard.comment:
message += '<p>%s</p>' % wizard.comment
for active_id in context.get('active_ids', []):
lead_obj.message_post(cr, uid, active_id, body=message, subtype="mail.mt_comment", context=context)
if values:
lead_obj.write(cr, SUPERUSER_ID, context.get('active_ids', []), values)
if wizard.interested:
for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context):
lead_obj.convert_opportunity(cr, SUPERUSER_ID, [lead.id], lead.partner_id and lead.partner_id.id or None, context=None)
return {
'type': 'ir.actions.act_window_close',
}
| agpl-3.0 |
groovecoder/kuma | kuma/users/tests/test_templates.py | 12 | 13923 | from django.conf import settings
from jingo.helpers import urlparams
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from waffle.models import Flag
from kuma.core.urlresolvers import reverse
from . import UserTestCase
from .test_views import TESTUSER_PASSWORD
class SignupTests(UserTestCase):
localizing_client = False
@mock.patch('requests.post')
def test_signup_page(self, mock_post):
user_email = "[email protected]"
mock_post.return_value = mock_resp = mock.Mock()
mock_resp.json.return_value = {
"status": "okay",
"email": user_email,
"audience": "https://developer-local.allizom.org"
}
url = reverse('persona_login')
response = self.client.post(url, follow=True)
self.assertNotContains(response, 'Sign In Failure')
test_strings = ['Create your MDN profile to continue',
'choose a username',
'having trouble',
'I agree',
'to Mozilla',
'Terms',
'Privacy Notice']
for test_string in test_strings:
self.assertContains(response, test_string)
@mock.patch('requests.post')
def test_signup_page_disabled(self, mock_post):
user_email = "[email protected]"
mock_post.return_value = mock_resp = mock.Mock()
mock_resp.json.return_value = {
"status": "okay",
"email": user_email,
"audience": "https://developer-local.allizom.org"
}
url = reverse('persona_login')
registration_disabled = Flag.objects.create(
name='registration_disabled',
everyone=True
)
response = self.client.post(url, follow=True)
self.assertNotContains(response, 'Sign In Failure')
self.assertContains(response, 'Profile Creation Disabled')
# re-enable registration
registration_disabled.everyone = False
registration_disabled.save()
response = self.client.post(url, follow=True)
test_strings = ['Create your MDN profile to continue',
'choose a username',
'having trouble']
for test_string in test_strings:
self.assertContains(response, test_string)
class AccountEmailTests(UserTestCase):
localizing_client = True
def test_account_email_page_requires_signin(self):
url = reverse('account_email')
response = self.client.get(url, follow=True)
self.assertContains(response, 'Please sign in')
ok_(len(response.redirect_chain) > 0)
def test_account_email_page_single_email(self):
u = self.user_model.objects.get(username='testuser')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
url = reverse('account_email')
response = self.client.get(url)
self.assertContains(response, 'is your <em>primary</em> email address')
for test_string in ['Make Primary',
'Re-send Confirmation',
'Remove']:
self.assertNotContains(response, test_string)
def test_account_email_page_multiple_emails(self):
u = self.user_model.objects.get(username='testuser2')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
url = reverse('account_email')
response = self.client.get(url)
for test_string in ['Make Primary',
'Re-send Confirmation',
'Remove',
'Add Email',
'Edit profile']:
self.assertContains(response, test_string)
class SocialAccountConnectionsTests(UserTestCase):
localizing_client = True
def test_account_connections_page_requires_signin(self):
url = reverse('socialaccount_connections')
response = self.client.get(url, follow=True)
self.assertContains(response, 'Please sign in')
ok_(len(response.redirect_chain) > 0)
def test_account_connections_page(self):
u = self.user_model.objects.get(username='testuser')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
url = reverse('socialaccount_connections')
response = self.client.get(url)
for test_string in ['Disconnect', 'Connect a new account',
'Edit profile', 'Connect with']:
self.assertContains(response, test_string)
class AllauthPersonaTestCase(UserTestCase):
existing_persona_email = '[email protected]'
existing_persona_username = 'testuser'
localizing_client = False
def test_persona_auth_failure_copy(self):
"""
The explanatory page for failed Persona auth contains the
failure copy, and does not contain success messages or a form
to choose a username.
"""
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'failure',
'reason': 'this email address has been naughty'
}
response = self.client.post(reverse('persona_login'),
follow=True)
for expected_string in ('Account Sign In Failure',
'An error occurred while attempting to sign '
'in with your account.'):
self.assertContains(response, expected_string)
for unexpected_string in (
'Thanks for signing in to MDN with Persona.',
('<form class="submission readable-line-length" method="post" '
'action="/en-US/users/account/signup">'),
('<input name="username" maxlength="30" type="text"'
' autofocus="autofocus" required="required" '
'placeholder="Username" id="id_username" />'),
'<input type="hidden" name="email" value="',
'" id="id_email" />'):
self.assertNotContains(response, unexpected_string)
def test_persona_auth_success_copy(self):
"""
Successful Persona auth of a new user displays a success
message and the Persona-specific signup form, correctly
populated, and does not display the failure copy.
"""
persona_signup_email = '[email protected]'
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': persona_signup_email,
}
response = self.client.post(reverse('persona_login'),
follow=True)
for expected_string in (
# Test that we got:
#
# * Persona sign-in success message
#
# * Form with action set to the account-signup URL.
#
# * Username field, blank
#
# * Hidden email address field, pre-populated with the
# address used to authenticate to Persona.
'Thanks for signing in to MDN with Persona.',
('<form class="submission readable-line-length" method="post" '
'action="/en-US/users/account/signup">'),
('<input autofocus="autofocus" id="id_username" '
'maxlength="30" name="username" placeholder="Username" '
'required="required" type="text" />'),
('<input id="id_email" name="email" type="hidden" '
'value="%s" />' % persona_signup_email)):
self.assertContains(response, expected_string)
for unexpected_string in (
'<Account Sign In Failure',
'<An error occurred while attempting to sign '
'in with your account.'):
self.assertNotContains(response, unexpected_string)
def test_persona_signin_copy(self):
"""
After an existing user successfully authenticates with
Persona, their username, an indication that Persona was used
to log in, and a logout link appear in the auth tools section
of the page.
"""
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': self.existing_persona_email,
}
response = self.client.post(reverse('persona_login'),
follow=True)
eq_(response.status_code, 200)
user_url = reverse(
'users.user_detail',
kwargs={
'username': self.existing_persona_username
},
locale=settings.WIKI_DEFAULT_LANGUAGE)
signout_url = urlparams(
reverse('account_logout',
locale=settings.WIKI_DEFAULT_LANGUAGE),
next=reverse('home',
locale=settings.WIKI_DEFAULT_LANGUAGE))
parsed = pq(response.content)
login_info = parsed.find('.oauth-logged-in')
ok_(len(login_info.children()))
signed_in_message = login_info.children()[0]
ok_('title' in signed_in_message.attrib)
eq_('Signed in with Persona',
signed_in_message.attrib['title'])
auth_links = login_info.children()[1].getchildren()
ok_(len(auth_links))
user_link = auth_links[0].getchildren()[0]
ok_('href' in user_link.attrib)
eq_(user_url, user_link.attrib['href'])
signout_link = auth_links[1].getchildren()[0]
ok_('href' in signout_link.attrib)
eq_(signout_url.replace('%2F', '/'), # urlparams() encodes slashes
signout_link.attrib['href'])
def test_persona_form_present(self):
"""
When not authenticated, the Persona authentication components,
with correct data attributes, are present in page contents,
and the 'next' parameter is filled in.
"""
all_docs_url = reverse('wiki.all_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(all_docs_url, follow=True)
parsed = pq(response.content)
request_info = '{"siteName": "%(siteName)s", "siteLogo": "%(siteLogo)s"}' % \
settings.SOCIALACCOUNT_PROVIDERS['persona']['REQUEST_PARAMETERS']
stub_attrs = (
('data-csrf-token-url', reverse('persona_csrf_token')),
('data-request', request_info),
)
auth_attrs = (
('data-service', 'Persona'),
('data-next', all_docs_url),
)
stub_persona_form = parsed.find('#_persona_login')
ok_(len(stub_persona_form) > 0)
for stub_attr in stub_attrs:
ok_(stub_persona_form.attr(stub_attr[0]))
eq_(stub_attr[1], stub_persona_form.attr(stub_attr[0]))
auth_persona_form = parsed.find('.launch-persona-login')
ok_(len(auth_persona_form) > 0)
for auth_attr in auth_attrs:
ok_(auth_persona_form.attr(auth_attr[0]))
eq_(auth_attr[1], auth_persona_form.attr(auth_attr[0]))
def test_persona_signup_copy(self):
"""
After a new user signs up with Persona, their username, an
indication that Persona was used to log in, and a logout link
appear in the auth tools section of the page.
"""
persona_signup_email = '[email protected]'
persona_signup_username = 'templates_persona_signup_copy'
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': persona_signup_email,
}
self.client.post(reverse('persona_login'), follow=True)
data = {'website': '',
'username': persona_signup_username,
'email': persona_signup_email,
'terms': True}
response = self.client.post(
reverse('socialaccount_signup',
locale=settings.WIKI_DEFAULT_LANGUAGE),
data=data, follow=True)
user_url = reverse(
'users.user_detail',
kwargs={'username': persona_signup_username},
locale=settings.WIKI_DEFAULT_LANGUAGE)
signout_url = urlparams(
reverse('account_logout',
locale=settings.WIKI_DEFAULT_LANGUAGE),
next=reverse('home',
locale=settings.WIKI_DEFAULT_LANGUAGE))
parsed = pq(response.content)
login_info = parsed.find('.oauth-logged-in')
ok_(len(login_info.children()))
signed_in_message = login_info.children()[0]
ok_('title' in signed_in_message.attrib)
eq_('Signed in with Persona',
signed_in_message.attrib['title'])
auth_links = login_info.children()[1].getchildren()
ok_(len(auth_links))
user_link = auth_links[0].getchildren()[0]
ok_('href' in user_link.attrib)
eq_(user_url, user_link.attrib['href'])
signout_link = auth_links[1].getchildren()[0]
ok_('href' in signout_link.attrib)
eq_(signout_url.replace('%2F', '/'), # urlparams() encodes slashes
signout_link.attrib['href'])
| mpl-2.0 |
mlue/discordbridge | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/autumn.py | 135 | 2144 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
| mit |
Gabriel439/pants | tests/python/pants_test/option/test_options_bootstrapper.py | 2 | 9865 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.scope import ScopeInfo
from pants.util.contextutil import temporary_file, temporary_file_path
class BootstrapOptionsTest(unittest.TestCase):
def _do_test(self, expected_vals, config, env, args):
self._test_bootstrap_options(config, env, args,
pants_workdir=expected_vals[0],
pants_supportdir=expected_vals[1],
pants_distdir=expected_vals[2])
def _test_bootstrap_options(self, config, env, args, **expected_entries):
with temporary_file() as fp:
fp.write('[DEFAULT]\n')
if config:
for k, v in config.items():
fp.write('{0}: {1}\n'.format(k, v))
fp.close()
bootstrapper = OptionsBootstrapper(env=env, configpath=fp.name, args=args)
vals = bootstrapper.get_bootstrap_options().for_global_scope()
vals_dict = {k: getattr(vals, k) for k in expected_entries}
self.assertEquals(expected_entries, vals_dict)
def test_bootstrap_option_values(self):
# Check all defaults.
buildroot = get_buildroot()
def br(path):
# Returns the full path of the given path under the buildroot.
return '{}/{}'.format(buildroot, path)
self._do_test([br('.pants.d'), br('build-support'), br('dist')],
config=None, env={}, args=[])
# Check getting values from config, env and args.
self._do_test(['/from_config/.pants.d', br('build-support'), br('dist')],
config={'pants_workdir': '/from_config/.pants.d'}, env={}, args=[])
self._do_test([br('.pants.d'), '/from_env/build-support', br('dist')],
config=None,
env={'PANTS_SUPPORTDIR': '/from_env/build-support'}, args=[])
self._do_test([br('.pants.d'), br('build-support'), '/from_args/dist'],
config={}, env={}, args=['--pants-distdir=/from_args/dist'])
# Check that args > env > config.
self._do_test(['/from_config/.pants.d', '/from_env/build-support', '/from_args/dist'],
config={
'pants_workdir': '/from_config/.pants.d',
'pants_supportdir': '/from_config/build-support',
'pants_distdir': '/from_config/dist'
},
env={
'PANTS_SUPPORTDIR': '/from_env/build-support',
'PANTS_DISTDIR': '/from_env/dist'
},
args=['--pants-distdir=/from_args/dist'])
# Check that unrelated args and config don't confuse us.
self._do_test(['/from_config/.pants.d', '/from_env/build-support', '/from_args/dist'],
config={
'pants_workdir': '/from_config/.pants.d',
'pants_supportdir': '/from_config/build-support',
'pants_distdir': '/from_config/dist',
'unrelated': 'foo'
},
env={
'PANTS_SUPPORTDIR': '/from_env/build-support',
'PANTS_DISTDIR': '/from_env/dist'
},
args=['--pants-distdir=/from_args/dist', '--foo=bar', '--baz'])
def test_bootstrap_bool_option_values(self):
# Check the default.
self._test_bootstrap_options(config=None, env={}, args=[], pantsrc=True)
# Check an override via flag - currently bools (for store_true and store_false actions) cannot
# be inverted from the default via env vars or the config.
self._test_bootstrap_options(config={}, env={}, args=['--no-pantsrc'], pantsrc=False)
self._test_bootstrap_options(config={'pantsrc': False}, env={}, args=[], pantsrc=False)
self._test_bootstrap_options(config={}, env={'PANTS_PANTSRC': 'False'}, args=[], pantsrc=False)
def test_create_bootstrapped_options(self):
# Check that we can set a bootstrap option from a cmd-line flag and have that interpolate
# correctly into regular config.
with temporary_file() as fp:
fp.write(dedent("""
[foo]
bar: %(pants_workdir)s/baz
[fruit]
apple: %(pants_supportdir)s/banana
"""))
fp.close()
bootstrapper = OptionsBootstrapper(env={
'PANTS_SUPPORTDIR': '/pear'
},
configpath=fp.name,
args=['--pants-workdir=/qux'])
opts = bootstrapper.get_full_options(known_scope_infos=[
ScopeInfo('', ScopeInfo.GLOBAL),
ScopeInfo('foo', ScopeInfo.TASK),
ScopeInfo('fruit', ScopeInfo.TASK)
])
opts.register('', '--pants-workdir') # So we don't choke on it on the cmd line.
opts.register('foo', '--bar')
opts.register('fruit', '--apple')
self.assertEquals('/qux/baz', opts.for_scope('foo').bar)
self.assertEquals('/pear/banana', opts.for_scope('fruit').apple)
def test_create_bootstrapped_multiple_config_override(self):
# check with multiple config files, the latest values always get taken
# in this case strategy will be overwritten, while fruit stays the same
with temporary_file() as fp:
fp.write(dedent("""
[compile.apt]
strategy: global
[fruit]
apple: red
"""))
fp.close()
bootstrapper_single_config = OptionsBootstrapper(configpath=fp.name,
args=['--config-override={}'.format(fp.name)])
opts_single_config = bootstrapper_single_config.get_full_options(known_scope_infos=[
ScopeInfo('', ScopeInfo.GLOBAL),
ScopeInfo('compile.apt', ScopeInfo.TASK),
ScopeInfo('fruit', ScopeInfo.TASK),
])
opts_single_config.register('', '--config-override') # So we don't choke on it on the cmd line.
opts_single_config.register('compile.apt', '--strategy')
opts_single_config.register('fruit', '--apple')
self.assertEquals('global', opts_single_config.for_scope('compile.apt').strategy)
self.assertEquals('red', opts_single_config.for_scope('fruit').apple)
with temporary_file() as fp2:
fp2.write(dedent("""
[compile.apt]
strategy: isolated
"""))
fp2.close()
bootstrapper_double_config = OptionsBootstrapper(
configpath=fp.name,
args=['--config-override={}'.format(fp.name),
'--config-override={}'.format(fp2.name)])
opts_double_config = bootstrapper_double_config.get_full_options(known_scope_infos=[
ScopeInfo('', ScopeInfo.GLOBAL),
ScopeInfo('compile.apt', ScopeInfo.TASK),
ScopeInfo('fruit', ScopeInfo.TASK),
])
opts_double_config.register('', '--config-override') # So we don't choke on it on the cmd line.
opts_double_config.register('compile.apt', '--strategy')
opts_double_config.register('fruit', '--apple')
self.assertEquals('isolated', opts_double_config.for_scope('compile.apt').strategy)
self.assertEquals('red', opts_double_config.for_scope('fruit').apple)
def test_full_options_caching(self):
with temporary_file_path() as config:
bootstrapper = OptionsBootstrapper(env={}, configpath=config, args=[])
opts1 = bootstrapper.get_full_options(known_scope_infos=[ScopeInfo('', ScopeInfo.GLOBAL),
ScopeInfo('foo', ScopeInfo.TASK)])
opts2 = bootstrapper.get_full_options(known_scope_infos=[ScopeInfo('foo', ScopeInfo.TASK),
ScopeInfo('', ScopeInfo.GLOBAL)])
self.assertIs(opts1, opts2)
opts3 = bootstrapper.get_full_options(known_scope_infos=[ScopeInfo('', ScopeInfo.GLOBAL),
ScopeInfo('foo', ScopeInfo.TASK),
ScopeInfo('', ScopeInfo.GLOBAL)])
self.assertIs(opts1, opts3)
opts4 = bootstrapper.get_full_options(known_scope_infos=[ScopeInfo('', ScopeInfo.GLOBAL)])
self.assertIsNot(opts1, opts4)
opts5 = bootstrapper.get_full_options(known_scope_infos=[ScopeInfo('', ScopeInfo.GLOBAL)])
self.assertIs(opts4, opts5)
self.assertIsNot(opts1, opts5)
def test_bootstrap_short_options(self):
def parse_options(*args):
return OptionsBootstrapper(args=list(args)).get_bootstrap_options().for_global_scope()
# No short options passed - defaults presented.
vals = parse_options()
self.assertIsNone(vals.logdir)
self.assertEqual('info', vals.level)
# Unrecognized short options passed and ignored - defaults presented.
vals = parse_options('-_UnderscoreValue', '-^')
self.assertIsNone(vals.logdir)
self.assertEqual('info', vals.level)
vals = parse_options('-d/tmp/logs', '-ldebug')
self.assertEqual('/tmp/logs', vals.logdir)
self.assertEqual('debug', vals.level)
def test_bootstrap_options_passthrough_dup_ignored(self):
def parse_options(*args):
return OptionsBootstrapper(args=list(args)).get_bootstrap_options().for_global_scope()
vals = parse_options('main', 'args', '-d/tmp/frogs', '--', '-d/tmp/logs')
self.assertEqual('/tmp/frogs', vals.logdir)
vals = parse_options('main', 'args', '--', '-d/tmp/logs')
self.assertIsNone(vals.logdir)
| apache-2.0 |
Rio517/pledgeservice | testlib/waitress/buffers.py | 21 | 8627 | ##############################################################################
#
# Copyright (c) 2001-2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Buffers
"""
from io import BytesIO
# copy_bytes controls the size of temp. strings for shuffling data around.
COPY_BYTES = 1 << 18 # 256K
# The maximum number of bytes to buffer in a simple string.
STRBUF_LIMIT = 8192
class FileBasedBuffer(object):
remain = 0
def __init__(self, file, from_buffer=None):
self.file = file
if from_buffer is not None:
from_file = from_buffer.getfile()
read_pos = from_file.tell()
from_file.seek(0)
while True:
data = from_file.read(COPY_BYTES)
if not data:
break
file.write(data)
self.remain = int(file.tell() - read_pos)
from_file.seek(read_pos)
file.seek(read_pos)
def __len__(self):
return self.remain
def __nonzero__(self):
return self.remain > 0
__bool__ = __nonzero__ # py3
def append(self, s):
file = self.file
read_pos = file.tell()
file.seek(0, 2)
file.write(s)
file.seek(read_pos)
self.remain = self.remain + len(s)
def get(self, numbytes=-1, skip=False):
file = self.file
if not skip:
read_pos = file.tell()
if numbytes < 0:
# Read all
res = file.read()
else:
res = file.read(numbytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def skip(self, numbytes, allow_prune=0):
if self.remain < numbytes:
raise ValueError("Can't skip %d bytes in buffer of %d bytes" % (
numbytes, self.remain)
)
self.file.seek(numbytes, 1)
self.remain = self.remain - numbytes
def newfile(self):
raise NotImplementedError()
def prune(self):
file = self.file
if self.remain == 0:
read_pos = file.tell()
file.seek(0, 2)
sz = file.tell()
file.seek(read_pos)
if sz == 0:
# Nothing to prune.
return
nf = self.newfile()
while True:
data = file.read(COPY_BYTES)
if not data:
break
nf.write(data)
self.file = nf
def getfile(self):
return self.file
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
self.remain = 0
class TempfileBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
FileBasedBuffer.__init__(self, self.newfile(), from_buffer)
def newfile(self):
from tempfile import TemporaryFile
return TemporaryFile('w+b')
class BytesIOBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
if from_buffer is not None:
FileBasedBuffer.__init__(self, BytesIO(), from_buffer)
else:
# Shortcut. :-)
self.file = BytesIO()
def newfile(self):
return BytesIO()
class ReadOnlyFileBasedBuffer(FileBasedBuffer):
# used as wsgi.file_wrapper
def __init__(self, file, block_size=32768):
self.file = file
self.block_size = block_size # for __iter__
def prepare(self, size=None):
if hasattr(self.file, 'seek') and hasattr(self.file, 'tell'):
start_pos = self.file.tell()
self.file.seek(0, 2)
end_pos = self.file.tell()
self.file.seek(start_pos)
fsize = end_pos - start_pos
if size is None:
self.remain = fsize
else:
self.remain = min(fsize, size)
return self.remain
def get(self, numbytes=-1, skip=False):
# never read more than self.remain (it can be user-specified)
if numbytes == -1 or numbytes > self.remain:
numbytes = self.remain
file = self.file
if not skip:
read_pos = file.tell()
res = file.read(numbytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def __iter__(self): # called by task if self.filelike has no seek/tell
return self
def next(self):
val = self.file.read(self.block_size)
if not val:
raise StopIteration
return val
__next__ = next # py3
def append(self, s):
raise NotImplementedError
class OverflowableBuffer(object):
"""
This buffer implementation has four stages:
- No data
- Bytes-based buffer
- BytesIO-based buffer
- Temporary file storage
The first two stages are fastest for simple transfers.
"""
overflowed = False
buf = None
strbuf = b'' # Bytes-based buffer.
def __init__(self, overflow):
# overflow is the maximum to be stored in a StringIO buffer.
self.overflow = overflow
def __len__(self):
buf = self.buf
if buf is not None:
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
return buf.__len__()
else:
return self.strbuf.__len__()
def __nonzero__(self):
# use self.__len__ rather than len(self) FBO of not getting
# OverflowError on Python 2
return self.__len__() > 0
__bool__ = __nonzero__ # py3
def _create_buffer(self):
strbuf = self.strbuf
if len(strbuf) >= self.overflow:
self._set_large_buffer()
else:
self._set_small_buffer()
buf = self.buf
if strbuf:
buf.append(self.strbuf)
self.strbuf = b''
return buf
def _set_small_buffer(self):
self.buf = BytesIOBasedBuffer(self.buf)
self.overflowed = False
def _set_large_buffer(self):
self.buf = TempfileBasedBuffer(self.buf)
self.overflowed = True
def append(self, s):
buf = self.buf
if buf is None:
strbuf = self.strbuf
if len(strbuf) + len(s) < STRBUF_LIMIT:
self.strbuf = strbuf + s
return
buf = self._create_buffer()
buf.append(s)
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
sz = buf.__len__()
if not self.overflowed:
if sz >= self.overflow:
self._set_large_buffer()
def get(self, numbytes=-1, skip=False):
buf = self.buf
if buf is None:
strbuf = self.strbuf
if not skip:
return strbuf
buf = self._create_buffer()
return buf.get(numbytes, skip)
def skip(self, numbytes, allow_prune=False):
buf = self.buf
if buf is None:
if allow_prune and numbytes == len(self.strbuf):
# We could slice instead of converting to
# a buffer, but that would eat up memory in
# large transfers.
self.strbuf = b''
return
buf = self._create_buffer()
buf.skip(numbytes, allow_prune)
def prune(self):
"""
A potentially expensive operation that removes all data
already retrieved from the buffer.
"""
buf = self.buf
if buf is None:
self.strbuf = b''
return
buf.prune()
if self.overflowed:
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
sz = buf.__len__()
if sz < self.overflow:
# Revert to a faster buffer.
self._set_small_buffer()
def getfile(self):
buf = self.buf
if buf is None:
buf = self._create_buffer()
return buf.getfile()
def close(self):
buf = self.buf
if buf is not None:
buf.close()
| apache-2.0 |
15Dkatz/pants | contrib/cpp/src/python/pants/contrib/cpp/tasks/cpp_compile.py | 16 | 3753 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.build_environment import get_buildroot
from pants.base.workunit import WorkUnitLabel
from pants.util.dirutil import safe_mkdir_for
from pants.contrib.cpp.tasks.cpp_task import CppTask
class CppCompile(CppTask):
"""Compile C++ sources into object files."""
@classmethod
def register_options(cls, register):
super(CppCompile, cls).register_options(register)
register('--cc-options', advanced=True, type=list, default=[], fingerprint=True,
help='Append these options to the compiler command line.')
register('--cc-extensions', advanced=True, type=list, fingerprint=True,
default=['.cc', '.cxx', '.cpp'],
help=('The list of extensions to consider when determining if a file is a '
'C++ source file.'))
@classmethod
def product_types(cls):
return ['objs']
@property
def cache_target_dirs(self):
return True
def execute(self):
"""Compile all sources in a given target to object files."""
def is_cc(source):
_, ext = os.path.splitext(source)
return ext in self.get_options().cc_extensions
targets = self.context.targets(self.is_cpp)
# Compile source files to objects.
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
obj_mapping = self.context.products.get('objs')
for vt in invalidation_check.all_vts:
for source in vt.target.sources_relative_to_buildroot():
if is_cc(source):
if not vt.valid:
with self.context.new_workunit(name='cpp-compile', labels=[WorkUnitLabel.MULTITOOL]):
# TODO: Parallelise the compilation.
# TODO: Only recompile source files that have changed since the
# object file was last written. Also use the output from
# gcc -M to track dependencies on headers.
self._compile(vt.target, vt.results_dir, source)
objpath = self._objpath(vt.target, vt.results_dir, source)
obj_mapping.add(vt.target, vt.results_dir).append(objpath)
def _objpath(self, target, results_dir, source):
abs_source_root = os.path.join(get_buildroot(), target.target_base)
abs_source = os.path.join(get_buildroot(), source)
rel_source = os.path.relpath(abs_source, abs_source_root)
root, _ = os.path.splitext(rel_source)
obj_name = root + '.o'
return os.path.join(results_dir, obj_name)
def _compile(self, target, results_dir, source):
"""Compile given source to an object file."""
obj = self._objpath(target, results_dir, source)
safe_mkdir_for(obj)
abs_source = os.path.join(get_buildroot(), source)
# TODO: include dir should include dependent work dir when headers are copied there.
include_dirs = []
for dep in target.dependencies:
if self.is_library(dep):
include_dirs.extend([os.path.join(get_buildroot(), dep.target_base)])
cmd = [self.cpp_toolchain.compiler]
cmd.extend(['-c'])
cmd.extend(('-I{0}'.format(i) for i in include_dirs))
cmd.extend(['-o' + obj, abs_source])
cmd.extend(self.get_options().cc_options)
# TODO: submit_async_work with self.run_command, [(cmd)] as a Work object.
with self.context.new_workunit(name='cpp-compile', labels=[WorkUnitLabel.COMPILER]) as workunit:
self.run_command(cmd, workunit)
self.context.log.info('Built c++ object: {0}'.format(obj))
| apache-2.0 |
gacarrillor/QGIS | python/PyQt/PyQt5/QtNetwork.py | 45 | 1041 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QtNetwork.py
---------------------
Date : March 2016
Copyright : (C) 2016 by Juergen E. Fischer
Email : jef at norbit dot de
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Juergen E. Fischer'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Juergen E. Fischer'
from PyQt5.QtNetwork import *
| gpl-2.0 |
zhaodelong/django | tests/template_tests/filter_tests/test_force_escape.py | 352 | 2917 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import force_escape
from django.test import SimpleTestCase
from django.utils.safestring import SafeData
from ..utils import setup
class ForceEscapeTests(SimpleTestCase):
"""
Force_escape is applied immediately. It can be used to provide
double-escaping, for example.
"""
@setup({'force-escape01': '{% autoescape off %}{{ a|force_escape }}{% endautoescape %}'})
def test_force_escape01(self):
output = self.engine.render_to_string('force-escape01', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape02': '{{ a|force_escape }}'})
def test_force_escape02(self):
output = self.engine.render_to_string('force-escape02', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape03': '{% autoescape off %}{{ a|force_escape|force_escape }}{% endautoescape %}'})
def test_force_escape03(self):
output = self.engine.render_to_string('force-escape03', {"a": "x&y"})
self.assertEqual(output, "x&amp;y")
@setup({'force-escape04': '{{ a|force_escape|force_escape }}'})
def test_force_escape04(self):
output = self.engine.render_to_string('force-escape04', {"a": "x&y"})
self.assertEqual(output, "x&amp;y")
# Because the result of force_escape is "safe", an additional
# escape filter has no effect.
@setup({'force-escape05': '{% autoescape off %}{{ a|force_escape|escape }}{% endautoescape %}'})
def test_force_escape05(self):
output = self.engine.render_to_string('force-escape05', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape06': '{{ a|force_escape|escape }}'})
def test_force_escape06(self):
output = self.engine.render_to_string('force-escape06', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape07': '{% autoescape off %}{{ a|escape|force_escape }}{% endautoescape %}'})
def test_force_escape07(self):
output = self.engine.render_to_string('force-escape07', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape08': '{{ a|escape|force_escape }}'})
def test_force_escape08(self):
output = self.engine.render_to_string('force-escape08', {"a": "x&y"})
self.assertEqual(output, "x&y")
class FunctionTests(SimpleTestCase):
def test_escape(self):
escaped = force_escape('<some html & special characters > here')
self.assertEqual(escaped, '<some html & special characters > here')
self.assertIsInstance(escaped, SafeData)
def test_unicode(self):
self.assertEqual(
force_escape('<some html & special characters > here ĐÅ€£'),
'<some html & special characters > here \u0110\xc5\u20ac\xa3',
)
| bsd-3-clause |
badlogicmanpreet/nupic | src/nupic/support/datafiles.py | 40 | 7949 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# TODO for NUPIC 2 -- document the interface!
# TODO for NuPIC 2 -- should this move to inferenceanalysis?
def _calculateColumnsFromLine(line):
if "," in line:
splitLine = line.strip().split(",")
n = len(splitLine)
if n:
if not splitLine[-1].strip():
return n-1
else:
return n
else:
return 0
else:
# Too flexible.
# return len([x for x in line.strip().split() if x != ","])
return len(line.strip().split())
def _isComment(strippedLine):
if strippedLine:
return strippedLine.startswith("#")
else:
return True
def _calculateColumnsFromFile(f, format, rewind):
# Calculate the number of columns.
# We will put more trust in the second line that the first, in case the
# first line includes header entries.
if format not in [0, 2, 3]:
raise RuntimeError("Supported formats are 0, 2, and 3.")
if format == 0:
line0 = f.readline()
csplit = line0.split()
if len(csplit) != 1:
raise RuntimeError("Expected first line of data file to "
"contain a single number of columns. "
" Found %d fields" % len(csplit))
try:
numColumns = int(csplit[0])
except:
raise RuntimeError("Expected first line of data file to "
"contain a single number of columns. Found '%s'" % csplit[0])
if rewind:
f.seek(0)
return numColumns
elif format == 2:
numColumns = 0
numLinesRead = 0
for line in f:
strippedLine = line.strip()
if not _isComment(strippedLine):
curColumns = _calculateColumnsFromLine(strippedLine)
numLinesRead += 1
if numColumns and (numColumns != curColumns):
raise RuntimeError("Different lines have different "
"numbers of columns.")
else:
numColumns = curColumns
if numLinesRead > 1:
break
if rewind:
f.seek(0)
return numColumns
# CSV file: we'll just check the first line
elif format == 3:
strippedLine = f.readline().strip()
numColumns = calculateColumnsFromLine(strippedLine)
if rewind:
f.seek(0)
return numColumns
def processCategoryFile(f, format, categoryColumn=None, categoryColumns=None, count=1):
"""Read the data out of the given category file, returning a tuple
(categoryCount, listOfCategories)
@param f A file-like object containing the category info.
@param format The format of the category file. TODO: describe.
@param categoryColumn If non-None, this is the column number (zero-based)
where the category info starts in the file. If
None, indicates that the file only contains category
information (same as passing 0, but allows some
extra sanity checking).
@param categoryColumns Indicates how many categories are active per
timepoint (how many elements wide the category info
is). If 0, we'll determine this from the file. If
None (the default), means that the category info
is 1 element wide, and that the list we return
will just be a list of ints (rather than a list of
lists)
@param count Determines the size of chunks that will be aggregated
into a single entry. The default is 1, so each entry
from the file will be represented in the result. If
count > 1 then 'count' categories (all identical) will
be collapsed into a single entry. This is helpful for
aggregating explorers like EyeMovements where multiple
presentaions are conceptually the same item.
@return categoryCount The number of categories (aka maxCat + 1)
@return allCategories A list of the categories read in, with one item per
time point. If 'categoryColumns' is None, each item
will be an int. Otherwise, each item will be a list
of ints. If count > 1 then the categories will be
aggregated, so that each chunk of 'count' categories
will result in only one entry (all categories in a chunk
must be identical)
"""
calculatedCategoryColumns = _calculateColumnsFromFile(f, format=format,
rewind=(format==2 or format==3))
# If the user passed categoryColumns as None, we'll return a list of ints
# directly; otherwise we'll return a list of lists...
wantListOfInts = (categoryColumns is None)
# Get arguments sanitized...
if categoryColumns == 0:
# User has told us to auto-calculate the # of categories / time point...
# If categoryColumn is not 0 or None, that's an error...
if categoryColumn:
raise RuntimeError("You can't specify an offset for category data "
"if using automatic width.")
categoryColumn = 0
categoryColumns = calculatedCategoryColumns
elif categoryColumns is None:
# User has told us that there's just one category...
if categoryColumn is None:
if calculatedCategoryColumns != 1:
raise RuntimeError("Category file must contain exactly one column.")
categoryColumn = 0
categoryColumns = 1
else:
# User specified exactly how big the category data is...
if (categoryColumns + categoryColumn) > calculatedCategoryColumns:
raise RuntimeError("Not enough categories in file")
maxCategory = 0
allCategories = []
for line in f:
strippedLine = line.strip()
if not _isComment(strippedLine):
if wantListOfInts:
category = int(strippedLine.split()[categoryColumn])
allCategories.append(category)
maxCategory = max(maxCategory, category)
else:
categories = strippedLine.split()[categoryColumn:
categoryColumn+categoryColumns]
categories = map(int, categories)
allCategories.append(categories)
maxCategory = max(maxCategory, max(categories))
categoryCount = maxCategory + 1
# Aggregate categories
result = []
if count > 1:
# Make sure there the number of categories can be aggregated
# exactly by chunks of size 'count'
assert len(allCategories) % count == 0
start = 0
for i in range(len(allCategories) / count):
end = start + count
# Make sure each chunk of size 'count' contains exactly one category
assert (min(allCategories[start:end]) == max(allCategories[start:end]))
# Add just one entry for each chunk
result.append(allCategories[start])
start = end
else:
result = allCategories
return categoryCount, result
| agpl-3.0 |
tszym/ansible | lib/ansible/modules/packaging/os/swupd.py | 7 | 9152 | #!/usr/bin/python
# (c) 2017, Alberto Murillo <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swupd
short_description: Manages updates and bundles in ClearLinux systems.
description:
- Manages updates and bundles with the swupd bundle manager, which is used by the
Clear Linux Project for Intel Architecture.
version_added: "2.3"
author: Alberto Murillo (@albertomurillo)
options:
contenturl:
description:
- URL pointing to the contents of available bundles.
If not specified, the contents are retrieved from clearlinux.org.
required: false
default: null
format:
description:
- The format suffix for version file downloads. For example [1,2,3,staging,etc].
If not specified, the default format is used.
required: false
default: null
manifest:
description:
- The manifest contains information about the bundles at certaion version of the OS.
Specify a Manifest version to verify against that version or leave unspecified to
verify against the current version.
required: false
default: null
aliases: [release, version]
name:
description:
- Name of the (I)bundle to install or remove.
required: false
default: null
aliases: [bundle]
state:
description:
- Indicates the desired (I)bundle state. C(present) ensures the bundle
is installed while C(absent) ensures the (I)bundle is not installed.
required: false
default: present
choices: [present, absent]
update:
description:
- Updates the OS to the latest version.
required: false
default: no
url:
description:
- Overrides both I(contenturl) and I(versionurl).
required: false
default: null
verify:
description:
- Verify content for OS version.
required: false
default: null
versionurl:
description:
- URL for version string download.
required: false
default: null
'''
EXAMPLES = '''
- name: Update the OS to the latest version
swupd:
update: yes
- name: Installs the "foo" bundle
swupd:
name: foo
state: present
- name: Removes the "foo" bundle
swupd:
name: foo
state: absent
- name: Check integrity of filesystem
swupd:
verify: yes
- name: Downgrade OS to release 12920
swupd:
verify: yes
manifest: 12920
'''
RETURN = '''
stdout:
description: stdout of swupd
returned: always
type: string
stderr:
description: stderr of swupd
returned: always
type: string
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Swupd(object):
FILES_NOT_MATCH = "files did not match"
FILES_REPLACED = "missing files were replaced"
FILES_FIXED = "files were fixed"
FILES_DELETED = "files were deleted"
def __init__(self, module):
# Fail if swupd is not found
self.module = module
self.swupd_cmd = module.get_bin_path("swupd", False)
if not self.swupd_cmd:
module.fail_json(msg="Could not find swupd.")
# Initialize parameters
for key in module.params.keys():
setattr(self, key, module.params[key])
# Initialize return values
self.changed = False
self.failed = False
self.msg = None
self.rc = None
self.stderr = ""
self.stdout = ""
def _run_cmd(self, cmd):
self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
def _get_cmd(self, command):
cmd = "%s %s" % (self.swupd_cmd, command)
if self.format:
cmd += " --format=%s" % self.format
if self.manifest:
cmd += " --manifest=%s" % self.manifest
if self.url:
cmd += " --url=%s" % self.url
else:
if self.contenturl and command != "check-update":
cmd += " --contenturl=%s" % self.contenturl
if self.versionurl:
cmd += " --versionurl=%s" % self.versionurl
return cmd
def _is_bundle_installed(self, bundle):
try:
os.stat("/usr/share/clear/bundles/%s" % bundle)
except OSError:
return False
return True
def _needs_update(self):
cmd = self._get_cmd("check-update")
self._run_cmd(cmd)
if self.rc == 0:
return True
if self.rc == 1:
return False
self.failed = True
self.msg = "Failed to check for updates"
def _needs_verify(self):
cmd = self._get_cmd("verify")
self._run_cmd(cmd)
if self.rc != 0:
self.failed = True
self.msg = "Failed to check for filesystem inconsistencies."
if self.FILES_NOT_MATCH in self.stdout:
return True
return False
def install_bundle(self, bundle):
"""Installs a bundle with `swupd bundle-add bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=not self._is_bundle_installed(bundle))
if self._is_bundle_installed(bundle):
self.msg = "Bundle %s is already installed" % bundle
return
cmd = self._get_cmd("bundle-add %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s installed" % bundle
return
if self.rc == 18:
self.msg = "Bundle name %s is invalid" % bundle
return
self.failed = True
self.msg = "Failed to install bundle %s" % bundle
def remove_bundle(self, bundle):
"""Removes a bundle with `swupd bundle-remove bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=self._is_bundle_installed(bundle))
if not self._is_bundle_installed(bundle):
self.msg = "Bundle %s not installed"
return
cmd = self._get_cmd("bundle-remove %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s removed" % bundle
return
self.failed = True
self.msg = "Failed to remove bundle %s" % bundle
def update_os(self):
"""Updates the os with `swupd update`"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_update())
if not self._needs_update():
self.msg = "There are no updates available"
return
cmd = self._get_cmd("update")
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Update successful"
return
self.failed = True
self.msg = "Failed to check for updates"
def verify_os(self):
"""Verifies filesystem against specified or current version"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_verify())
if not self._needs_verify():
self.msg = "No files where changed"
return
cmd = self._get_cmd("verify --fix")
self._run_cmd(cmd)
if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
self.changed = True
self.msg = "Fix successful"
return
self.failed = True
self.msg = "Failed to verify the OS"
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
contenturl=dict(type="str"),
format=dict(type="str"),
manifest=dict(aliases=["release", "version"], type="int"),
name=dict(aliases=["bundle"], type="str"),
state=dict(default="present", choices=["present", "absent"], type="str"),
update=dict(default=False, type="bool"),
url=dict(type="str"),
verify=dict(default=False, type="bool"),
versionurl=dict(type="str"),
),
required_one_of=[["name", "update", "verify"]],
mutually_exclusive=[["name", "update", "verify"]],
supports_check_mode=True
)
swupd = Swupd(module)
name = module.params["name"]
state = module.params["state"]
update = module.params["update"]
verify = module.params["verify"]
if update:
swupd.update_os()
elif verify:
swupd.verify_os()
elif state == "present":
swupd.install_bundle(name)
elif state == "absent":
swupd.remove_bundle(name)
else:
swupd.failed = True
if swupd.failed:
module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
else:
module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
if __name__ == '__main__':
main()
| gpl-3.0 |
drawks/ansible | lib/ansible/modules/windows/win_rds_cap.py | 38 | 4243 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Subileau (@ksubileau)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_rds_cap
short_description: Manage Connection Authorization Policies (CAP) on a Remote Desktop Gateway server
description:
- Creates, removes and configures a Remote Desktop connection authorization policy (RD CAP).
- A RD CAP allows you to specify the users who can connect to a Remote Desktop Gateway server.
version_added: "2.8"
author:
- Kevin Subileau (@ksubileau)
options:
name:
description:
- Name of the connection authorization policy.
type: str
required: yes
state:
description:
- The state of connection authorization policy.
- If C(absent) will ensure the policy is removed.
- If C(present) will ensure the policy is configured and exists.
- If C(enabled) will ensure the policy is configured, exists and enabled.
- If C(disabled) will ensure the policy is configured, exists, but disabled.
type: str
choices: [ absent, enabled, disabled, present ]
default: present
auth_method:
description:
- Specifies how the RD Gateway server authenticates users.
- When a new CAP is created, the default value is C(password).
type: str
choices: [ both, none, password, smartcard ]
order:
description:
- Evaluation order of the policy.
- The CAP in which I(order) is set to a value of '1' is evaluated first.
- By default, a newly created CAP will take the first position.
- If the given value exceed the total number of existing policies,
the policy will take the last position but the evaluation order
will be capped to this number.
type: int
session_timeout:
description:
- The maximum time, in minutes, that a session can be idle.
- A value of zero disables session timeout.
type: int
session_timeout_action:
description:
- The action the server takes when a session times out.
- 'C(disconnect): disconnect the session.'
- 'C(reauth): silently reauthenticate and reauthorize the session.'
type: str
choices: [ disconnect, reauth ]
default: disconnect
idle_timeout:
description:
- Specifies the time interval, in minutes, after which an idle session is disconnected.
- A value of zero disables idle timeout.
type: int
allow_only_sdrts_servers:
description:
- Specifies whether connections are allowed only to Remote Desktop Session Host servers that
enforce Remote Desktop Gateway redirection policy.
type: bool
user_groups:
description:
- A list of user groups that is allowed to connect to the Remote Gateway server.
- Required when a new CAP is created.
type: list
computer_groups:
description:
- A list of computer groups that is allowed to connect to the Remote Gateway server.
type: list
redirect_clipboard:
description:
- Allow clipboard redirection.
type: bool
redirect_drives:
description:
- Allow disk drive redirection.
type: bool
redirect_printers:
description:
- Allow printers redirection.
type: bool
redirect_serial:
description:
- Allow serial port redirection.
type: bool
redirect_pnp:
description:
- Allow Plug and Play devices redirection.
type: bool
requirements:
- Windows Server 2008R2 (6.1) or higher.
- The Windows Feature "RDS-Gateway" must be enabled.
seealso:
- module: win_rds_cap
- module: win_rds_rap
- module: win_rds_settings
'''
EXAMPLES = r'''
- name: Create a new RDS CAP with a 30 minutes timeout and clipboard redirection enabled
win_rds_cap:
name: My CAP
user_groups:
- BUILTIN\users
session_timeout: 30
session_timeout_action: disconnect
allow_only_sdrts_servers: yes
redirect_clipboard: yes
redirect_drives: no
redirect_printers: no
redirect_serial: no
redirect_pnp: no
state: enabled
'''
RETURN = r'''
'''
| gpl-3.0 |
VielSoft/odoo | addons/account/wizard/account_fiscalyear_close.py | 222 | 15660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
# reconcile_ref deptends from reconcile_id but was not recomputed
obj_acc_move_line._store_set_values(cr, uid, ids, ['reconcile_ref'], context=context)
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = [x['id'] for x in result]
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vams1991/Security-Tool | app/views.py | 1 | 1823 | from flask import render_template, flash, redirect,jsonify
from app import app
from .forms import LoginForm,Scanner
from XSSModule import XSS_Module
from urlparse import urlparse
from sql import SQL_Module
from crawler import main
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'hacker'}
posts = [
{
'author': {'nickname': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'nickname': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash('Login requested for OpenID="%s", remember_me=%s' %
(form.openid.data, str(form.remember_me.data)))
return redirect('/index')
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@app.route('/scanner', methods=['GET', 'POST'])
def scanner():
form = Scanner()
if form.validate_on_submit():
flash('Scanning URL="%s"' %
(form.seed_url.data))
o = urlparse(form.seed_url.data)
if o.scheme=='http' or o.scheme=='https':
flash('Valid URL !')
obj=main(form.seed_url.data)
#XSS_Module(form.seed_url.data,obj)
SQL_Module(form.seed_url.data,obj)
else :
flash('Invalid URL!');
return render_template('scanner.html',
title='Scanner',
form=form)
| bsd-3-clause |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/encodings/utf_16.py | 404 | 3984 | """ Python 'utf-16' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_16_encode
def decode(input, errors='strict'):
return codecs.utf_16_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_16_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_16_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_16_be_decode
elif consumed >= 2:
raise UnicodeError("UTF-16 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
codecs.StreamWriter.__init__(self, stream, errors)
self.encoder = None
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_16_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_16_le_decode
elif byteorder == 1:
self.decode = codecs.utf_16_be_decode
elif consumed>=2:
raise UnicodeError,"UTF-16 stream does not start with BOM"
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-2.0 |
goofwear/raspberry_pwn | src/pentest/metagoofil/hachoir_parser/container/action_script.py | 9 | 12011 | """
SWF (Macromedia/Adobe Flash) file parser.
Documentation:
- Alexis' SWF Reference:
http://www.m2osw.com/swf_alexref.html
Author: Sebastien Ponce
Creation date: 26 April 2008
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, UInt8, UInt32, Int16, UInt16, Float32, CString, Enum,
Bytes, RawBytes, NullBits, String, SubFile, Field)
from hachoir_core.field.float import FloatExponent
from struct import unpack
class FlashFloat64(FieldSet):
def createFields(self):
yield Bits(self, "mantisa_high", 20)
yield FloatExponent(self, "exponent", 11)
yield Bit(self, "negative")
yield Bits(self, "mantisa_low", 32)
def createValue(self):
# Manual computation:
# mantisa = mantisa_high * 2^32 + mantisa_low
# float = 2^exponent + (1 + mantisa / 2^52)
# (and float is negative if negative=True)
bytes = self.parent.stream.readBytes(
self.absolute_address, self.size//8)
# Mix bytes: xxxxyyyy <=> yyyyxxxx
bytes = bytes[4:8] + bytes[0:4]
return unpack('<d', bytes)[0]
TYPE_INFO = {
0x00: (CString, "Cstring[]"),
0x01: (Float32, "Float[]"),
0x02: (None, "Null[]"),
0x03: (None, "Undefined[]"),
0x04: (UInt8, "Register[]"),
0x05: (UInt8, "Boolean[]"),
0x06: (FlashFloat64, "Double[]"),
0x07: (UInt32, "Integer[]"),
0x08: (UInt8, "Dictionnary_Lookup_Index[]"),
0x09: (UInt16, "Large_Dictionnary_Lookup_Index[]"),
}
def parseBranch(parent, size):
yield Int16(parent, "offset")
def parseDeclareFunction(parent, size):
yield CString(parent, "name")
argCount = UInt16(parent, "arg_count")
yield argCount
for i in range(argCount.value):
yield CString(parent, "arg[]")
yield UInt16(parent, "function_length")
def parseDeclareFunctionV7(parent, size):
yield CString(parent, "name")
argCount = UInt16(parent, "arg_count")
yield argCount
yield UInt8(parent, "reg_count")
yield Bits(parent, "reserved", 7)
yield Bit(parent, "preload_global")
yield Bit(parent, "preload_parent")
yield Bit(parent, "preload_root")
yield Bit(parent, "suppress_super")
yield Bit(parent, "preload_super")
yield Bit(parent, "suppress_arguments")
yield Bit(parent, "preload_arguments")
yield Bit(parent, "suppress_this")
yield Bit(parent, "preload_this")
for i in range(argCount.value):
yield UInt8(parent, "register[]")
yield CString(parent, "arg[]")
yield UInt16(parent, "function_length")
def parseTry(parent, size):
yield Bits(parent, "reserved", 5)
catchInReg = Bit(parent, "catch_in_register")
yield catchInReg
yield Bit(parent, "finally")
yield Bit(parent, "catch")
yield UInt8(parent, "try_size")
yield UInt8(parent, "catch_size")
yield UInt8(parent, "finally_size")
if catchInReg.value:
yield CString(parent, "name")
else:
yield UInt8(parent, "register")
def parsePushData(parent, size):
while not parent.eof:
codeobj = UInt8(parent, "data_type[]")
yield codeobj
code = codeobj.value
if code not in TYPE_INFO:
raise ParserError("Unknown type in Push_Data : " + hex(code))
parser, name = TYPE_INFO[code]
if parser:
yield parser(parent, name)
# else:
# yield Field(parent, name, 0)
def parseSetTarget(parent, size):
yield CString(parent, "target")
def parseWith(parent, size):
yield UInt16(parent, "size")
def parseGetURL(parent, size):
yield CString(parent, "url")
yield CString(parent, "target")
def parseGetURL2(parent, size):
yield UInt8(parent, "method")
def parseGotoExpression(parent, size):
yield UInt8(parent, "play")
def parseGotoFrame(parent, size):
yield UInt16(parent, "frame_no")
def parseGotoLabel(parent, size):
yield CString(parent, "label")
def parseWaitForFrame(parent, size):
yield UInt16(parent, "frame")
yield UInt8(parent, "skip")
def parseWaitForFrameDyn(parent, size):
yield UInt8(parent, "skip")
def parseDeclareDictionnary(parent, size):
count = UInt16(parent, "count")
yield count
for i in range(count.value):
yield CString(parent, "dictionnary[]")
def parseStoreRegister(parent, size):
yield UInt8(parent, "register")
def parseStrictMode(parent, size):
yield UInt8(parent, "strict")
class Instruction(FieldSet):
ACTION_INFO = {
0x00: ("end[]", "End", None),
0x99: ("Branch_Always[]", "Branch Always", parseBranch),
0x9D: ("Branch_If_True[]", "Branch If True", parseBranch),
0x3D: ("Call_Function[]", "Call Function", None),
0x52: ("Call_Method[]", "Call Method", None),
0x9B: ("Declare_Function[]", "Declare Function", parseDeclareFunction),
0x8E: ("Declare_Function_V7[]", "Declare Function (V7)", parseDeclareFunctionV7),
0x3E: ("Return[]", "Return", None),
0x2A: ("Throw[]", "Throw", None),
0x8F: ("Try[]", "Try", parseTry),
# Stack Control
0x4C: ("Duplicate[]", "Duplicate", None),
0x96: ("Push_Data[]", "Push Data", parsePushData),
0x4D: ("Swap[]", "Swap", None),
# Action Script Context
0x8B: ("Set_Target[]", "Set Target", parseSetTarget),
0x20: ("Set_Target_dynamic[]", "Set Target (dynamic)", None),
0x94: ("With[]", "With", parseWith),
# Movie Control
0x9E: ("Call_Frame[]", "Call Frame", None),
0x83: ("Get_URL[]", "Get URL", parseGetURL),
0x9A: ("Get_URL2[]", "Get URL2", parseGetURL2),
0x9F: ("Goto_Expression[]", "Goto Expression", parseGotoExpression),
0x81: ("Goto_Frame[]", "Goto Frame", parseGotoFrame),
0x8C: ("Goto_Label[]", "Goto Label", parseGotoLabel),
0x04: ("Next_Frame[]", "Next Frame", None),
0x06: ("Play[]", "Play", None),
0x05: ("Previous_Frame[]", "Previous Frame", None),
0x07: ("Stop[]", "Stop", None),
0x08: ("Toggle_Quality[]", "Toggle Quality", None),
0x8A: ("Wait_For_Frame[]", "Wait For Frame", parseWaitForFrame),
0x8D: ("Wait_For_Frame_dynamic[]", "Wait For Frame (dynamic)", parseWaitForFrameDyn),
# Sound
0x09: ("Stop_Sound[]", "Stop Sound", None),
# Arithmetic
0x0A: ("Add[]", "Add", None),
0x47: ("Add_typed[]", "Add (typed)", None),
0x51: ("Decrement[]", "Decrement", None),
0x0D: ("Divide[]", "Divide", None),
0x50: ("Increment[]", "Increment", None),
0x18: ("Integral_Part[]", "Integral Part", None),
0x3F: ("Modulo[]", "Modulo", None),
0x0C: ("Multiply[]", "Multiply", None),
0x4A: ("Number[]", "Number", None),
0x0B: ("Subtract[]", "Subtract", None),
# Comparisons
0x0E: ("Equal[]", "Equal", None),
0x49: ("Equal_typed[]", "Equal (typed)", None),
0x66: ("Strict_Equal[]", "Strict Equal", None),
0x67: ("Greater_Than_typed[]", "Greater Than (typed)", None),
0x0F: ("Less_Than[]", "Less Than", None),
0x48: ("Less_Than_typed[]", "Less Than (typed)", None),
0x13: ("String_Equal[]", "String Equal", None),
0x68: ("String_Greater_Than[]", "String Greater Than", None),
0x29: ("String_Less_Than[]", "String Less Than", None),
# Logical and Bit Wise
0x60: ("And[]", "And", None),
0x10: ("Logical_And[]", "Logical And", None),
0x12: ("Logical_Not[]", "Logical Not", None),
0x11: ("Logical_Or[]", "Logical Or", None),
0x61: ("Or[]", "Or", None),
0x63: ("Shift_Left[]", "Shift Left", None),
0x64: ("Shift_Right[]", "Shift Right", None),
0x65: ("Shift_Right_Unsigned[]", "Shift Right Unsigned", None),
0x62: ("Xor[]", "Xor", None),
# Strings & Characters (See the String Object also)
0x33: ("Chr[]", "Chr", None),
0x37: ("Chr_multi-bytes[]", "Chr (multi-bytes)", None),
0x21: ("Concatenate_Strings[]", "Concatenate Strings", None),
0x32: ("Ord[]", "Ord", None),
0x36: ("Ord_multi-bytes[]", "Ord (multi-bytes)", None),
0x4B: ("String[]", "String", None),
0x14: ("String_Length[]", "String Length", None),
0x31: ("String_Length_multi-bytes[]", "String Length (multi-bytes)", None),
0x15: ("SubString[]", "SubString", None),
0x35: ("SubString_multi-bytes[]", "SubString (multi-bytes)", None),
# Properties
0x22: ("Get_Property[]", "Get Property", None),
0x23: ("Set_Property[]", "Set Property", None),
# Objects
0x2B: ("Cast_Object[]", "Cast Object", None),
0x42: ("Declare_Array[]", "Declare Array", None),
0x88: ("Declare_Dictionary[]", "Declare Dictionary", parseDeclareDictionnary),
0x43: ("Declare_Object[]", "Declare Object", None),
0x3A: ("Delete[]", "Delete", None),
0x3B: ("Delete_All[]", "Delete All", None),
0x24: ("Duplicate_Sprite[]", "Duplicate Sprite", None),
0x46: ("Enumerate[]", "Enumerate", None),
0x55: ("Enumerate_Object[]", "Enumerate Object", None),
0x69: ("Extends[]", "Extends", None),
0x4E: ("Get_Member[]", "Get Member", None),
0x45: ("Get_Target[]", "Get Target", None),
0x2C: ("Implements[]", "Implements", None),
0x54: ("Instance_Of[]", "Instance Of", None),
0x40: ("New[]", "New", None),
0x53: ("New_Method[]", "New Method", None),
0x25: ("Remove_Sprite[]", "Remove Sprite", None),
0x4F: ("Set_Member[]", "Set Member", None),
0x44: ("Type_Of[]", "Type Of", None),
# Variables
0x41: ("Declare_Local_Variable[]", "Declare Local Variable", None),
0x1C: ("Get_Variable[]", "Get Variable", None),
0x3C: ("Set_Local_Variable[]", "Set Local Variable", None),
0x1D: ("Set_Variable[]", "Set Variable", None),
# Miscellaneous
0x2D: ("FSCommand2[]", "FSCommand2", None),
0x34: ("Get_Timer[]", "Get Timer", None),
0x30: ("Random[]", "Random", None),
0x27: ("Start_Drag[]", "Start Drag", None),
0x28: ("Stop_Drag[]", "Stop Drag", None),
0x87: ("Store_Register[]", "Store Register", parseStoreRegister),
0x89: ("Strict_Mode[]", "Strict Mode", parseStrictMode),
0x26: ("Trace[]", "Trace", None),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
code = self["action_id"].value
if code & 128:
self._size = (3 + self["action_length"].value) * 8
else:
self._size = 8
if code in self.ACTION_INFO:
self._name, self._description, self.parser = self.ACTION_INFO[code]
else:
self.parser = None
def createFields(self):
yield Bits(self, "action_id", 8)
if not (self["action_id"].value & 128):
return
yield UInt16(self, "action_length")
size = self["action_length"].value
if not size:
return
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "action_data", size)
def createDescription(self):
return self._description
def __str__(self):
r = str(self._description)
for f in self:
if f.name not in ("action_id", "action_length", "count") and not f.name.startswith("data_type") :
r = r + "\n " + str((self.address+f.address)/8) + " " + str(f.name) + "=" + str(f.value)
return r
class ActionScript(FieldSet):
def createFields(self):
while not self.eof:
yield Instruction(self, "instr[]")
def __str__(self):
r = ""
for f in self:
r = r + str(f.address/8) + " " + str(f) + "\n"
return r
def parseActionScript(parent, size):
yield ActionScript(parent, "action", size=size*8)
| gpl-3.0 |
0/pathintmatmult | examples/pigs_harmonic_oscillator_entangled.py | 1 | 4033 | #!/usr/bin/env python3
"""
Entangled harmonic oscillators PIGS example.
A pair of identical harmonic oscillators with a harmonic interaction potential.
"""
from argparse import ArgumentParser
import numpy as np
from pathintmatmult import PIGSIMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate entangled HO ground state properties using PIGSMM2.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega-0', metavar='W', type=float, required=True, help='central potential angular frequency (K)')
p_config.add_argument('--omega-int', metavar='W', type=float, required=True, help='interaction potential angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='propagation length (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p_config.add_argument('--trial-deform', metavar='D', type=float, help='deformation factor for exact trial function')
p.add_argument('--wf-out', metavar='FILE', help='path to output wavefunction values')
p.add_argument('--density-diagonal-out', metavar='FILE', help='path to output diagonal density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega_0 = args.omega_0 * KB / HBAR # 1/ps
omega_int = args.omega_int * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
trial_deform = args.trial_deform
wf_out = args.wf_out
density_diagonal_out = args.density_diagonal_out
# Calculate values.
pot_0 = harmonic_potential(m=mass, w=omega_0)
pot_int = harmonic_potential(m=mass, w=omega_int)
def total_potential(qs: '[nm]') -> 'kJ/mol':
return pot_0(qs[..., [0]]) + pot_0(qs[..., [1]]) + pot_int(qs[..., [0]] - qs[..., [1]])
kwargs = {}
if trial_deform is not None:
alpha = trial_deform * mass / HBAR # ps/nm^2
omega_R = omega_0 # 1/ps
omega_r = np.sqrt(omega_0 * omega_0 + 2 * omega_int * omega_int) # 1/ps
omega_p = omega_R + omega_r # 1/ps
omega_m = omega_R - omega_r # 1/ps
def trial_f(qs: '[nm]') -> '1':
return np.exp(-0.25 * alpha * (omega_p * (qs[..., 0] ** 2 + qs[..., 1] ** 2) + 2 * omega_m * qs[..., 0] * qs[..., 1]))
def trial_f_diff_0(qs: '[nm]') -> '1/nm^2':
return 0.5 * alpha * (0.5 * alpha * (omega_p * qs[..., 0] + omega_m * qs[..., 1]) ** 2 - omega_p) * trial_f(qs)
def trial_f_diff_1(qs: '[nm]') -> '1/nm^2':
return 0.5 * alpha * (0.5 * alpha * (omega_m * qs[..., 0] + omega_p * qs[..., 1]) ** 2 - omega_p) * trial_f(qs)
kwargs['trial_f'] = trial_f
kwargs['trial_f_diffs'] = [trial_f_diff_0, trial_f_diff_1]
ho_pigs = PIGSIMM([mass, mass], [grid_range, grid_range], [grid_len, grid_len], total_potential, beta, num_links, **kwargs)
estimated_potential_energy = ho_pigs.expectation_value(total_potential) / KB # K
estimated_total_energy = ho_pigs.energy_mixed / KB # K
estimated_trace = ho_pigs.trace_renyi2
print('V = {} K'.format(estimated_potential_energy))
print('E_mixed = {} K'.format(estimated_total_energy))
print('trace = {}'.format(estimated_trace))
# Output wavefunction.
if wf_out:
np.savetxt(wf_out, np.hstack((ho_pigs.grid, ho_pigs.ground_wf[:, np.newaxis])))
# Output plot.
if density_diagonal_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
density = ho_pigs.density_diagonal.reshape(grid_len, grid_len)
plot2d(density, xy_range, xy_range, density_diagonal_out, x_label=r'$q_2 / \mathrm{nm}$', y_label=r'$q_1 / \mathrm{nm}$')
| mit |
bgribble/mfp | mfp/gui/modes/enum_control.py | 1 | 4501 | #! /usr/bin/env python
'''
enum_control.py: EnumControl major mode
Copyright (c) 2010-2013 Bill Gribble <[email protected]>
'''
import math
from ..input_mode import InputMode
from .label_edit import LabelEditMode
class EnumEditMode (InputMode):
def __init__(self, window, element, label):
self.manager = window.input_mgr
self.window = window
self.enum = element
self.value = element.value
InputMode.__init__(self, "Number box config")
self.bind("C->", self.add_digit, "Increase displayed digits")
self.bind("C-<", self.del_digit, "Decrease displayed digits")
self.bind("C-[", self.set_lower, "Set lower bound on value")
self.bind("C-]", self.set_upper, "Set upper bound on value")
self.extend(LabelEditMode(window, element, label))
def set_upper(self):
def cb(value):
if value.lower() == "none":
value = None
else:
value = float(value)
self.enum.set_bounds(self.enum.min_value, value)
self.window.get_prompted_input("Number upper bound: ", cb)
return True
def set_lower(self):
def cb(value):
if value.lower() == "none":
value = None
else:
value = float(value)
self.enum.set_bounds(value, self.enum.max_value)
self.window.get_prompted_input("Number lower bound: ", cb)
return True
def add_digit(self):
self.enum.digits += 1
self.enum.format_update()
self.enum.update()
return True
def del_digit(self):
if self.enum.digits > 0:
self.enum.digits -= 1
self.enum.format_update()
self.enum.update()
return True
def end_edits(self):
self.manager.disable_minor_mode(self)
self.enum.edit_mode = None
return False
class EnumControlMode (InputMode):
def __init__(self, window, element):
self.manager = window.input_mgr
self.window = window
self.enum = element
self.value = element.value
self.drag_started = False
self.drag_start_x = self.manager.pointer_x
self.drag_start_y = self.manager.pointer_y
self.drag_last_x = self.manager.pointer_x
self.drag_last_y = self.manager.pointer_y
InputMode.__init__(self, "Number box control")
self.bind("M1DOWN", self.drag_start)
self.bind("M1-MOTION", lambda: self.drag_selected(1.0),
"Change value (1x speed)")
self.bind("S-M1-MOTION", lambda: self.drag_selected(10.0),
"Change value (10x speed)")
self.bind("C-M1-MOTION", lambda: self.drag_selected(100.0),
"Change value (100x speed)")
self.bind("M1UP", self.drag_end)
self.bind("UP", lambda: self.changeval(1.0))
self.bind("DOWN", lambda: self.changeval(-1.0))
def changeval(self, delta):
if self.enum.scientific:
try:
logdigits = int(math.log10(self.enum.value))
except ValueError:
logdigits = 0
base_incr = 10 ** (logdigits - self.enum.digits)
else:
base_incr = 10 ** (-self.enum.digits)
self.value = self.enum.value + delta * base_incr
self.enum.update_value(self.value)
return True
def drag_start(self):
if self.manager.pointer_obj == self.enum:
if self.manager.pointer_obj not in self.window.selected:
self.window.select(self.manager.pointer_obj)
self.drag_started = True
self.drag_start_x = self.manager.pointer_x
self.drag_start_y = self.manager.pointer_y
self.drag_last_x = self.manager.pointer_x
self.drag_last_y = self.manager.pointer_y
self.value = self.enum.value
return True
else:
return False
def drag_selected(self, delta=1.0):
if self.drag_started is False:
return False
dx = self.manager.pointer_x - self.drag_last_x
dy = self.manager.pointer_y - self.drag_last_y
self.drag_last_x = self.manager.pointer_x
self.drag_last_y = self.manager.pointer_y
self.changeval(-1.0*delta*dy)
return True
def drag_end(self):
if self.drag_started:
self.drag_started = False
return True
else:
return False
| gpl-2.0 |
wwchun123/sumatrapdf | scripts/buildbot.py | 15 | 4207 | import sys
import os
import shutil
import time
import datetime
import cPickle
import traceback
import s3
import util
import util2
import build
import subprocess
from util import file_remove_try_hard, pretty_print_secs
from util import Serializable, create_dir
from util import load_config, strip_empty_lines
from util import verify_path_exists, verify_started_in_right_directory
import runtests
TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS = 60 * 60 * 8 # 8hrs
@util2.memoize
def cert_path():
scripts_dir = os.path.realpath(os.path.dirname(__file__))
cert_path = os.path.join(scripts_dir, "cert.pfx")
return verify_path_exists(cert_path)
def email_msg(msg):
c = load_config()
if not c.HasNotifierEmail():
print("email_build_failed() not ran because not c.HasNotifierEmail()")
return
sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
subject = "SumatraPDF buildbot failed"
util.sendmail(sender, senderpwd, ["[email protected]"], subject, msg)
def verify_can_send_email():
c = load_config()
if not c.HasNotifierEmail():
print("can't run. scripts/config.py missing notifier_email and/or notifier_email_pwd")
sys.exit(1)
def is_git_up_to_date():
out = subprocess.check_output(["git", "pull"])
return "Already up-to-date" in out
def ignore_pre_release_build_error(s):
# it's possible we did a pre-release build outside of buildbot and that
# shouldn't be a fatal error
if "already exists in s3" in s:
return True
return False
def build_pre_release():
try:
cert_dst_path = os.path.join("scripts", "cert.pfx")
if not os.path.exists(cert_dst_path):
shutil.copyfile(cert_path(), cert_dst_path)
print("Building pre-release")
build.build_pre_release()
except BaseException, e:
s = str(e)
print(s)
# a bit of a hack. not every kind of failure should stop the buildbot
if not ignore_pre_release_build_error(s):
traceback.print_exc()
raise
def buildbot_loop():
time_of_last_change = None
while True:
if not is_git_up_to_date():
# there was a new checking, it resets the wait time
time_of_last_change = datetime.datetime.now()
print("New checkins detected, sleeping for 15 minutes, %s until pre-release" %
pretty_print_secs(TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS))
time.sleep(60 * 15) # 15 mins
continue
if time_of_last_change == None:
# no changes since last pre-relase, sleep until there is a checkin
print("No checkins since last pre-release, sleeping for 15 minutes")
time.sleep(60 * 15) # 15 mins
continue
td = datetime.datetime.now() - time_of_last_change
secs_until_prerelease = TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS - \
int(td.total_seconds())
if secs_until_prerelease > 0:
print("Sleeping for 15 minutes, %s until pre-release" %
pretty_print_secs(secs_until_prerelease))
time.sleep(60 * 15) # 15 mins
continue
build_pre_release()
time_of_last_change = None
def main():
verify_can_send_email()
cert_path() # early check and ensures value is memoized
verify_started_in_right_directory()
# to avoid problems, we build a separate source tree, just for the buildbot
src_path = os.path.join("..", "sumatrapdf_buildbot")
verify_path_exists(src_path)
conf = load_config()
s3.set_secrets(conf.aws_access, conf.aws_secret)
s3.set_bucket("kjkpub")
os.chdir(src_path)
# test_email_tests_failed()
#build_version("8190", skip_release=True)
# test_build_html_index()
# build_sizes_json()
# build_curr(force=True)
buildbot_loop()
if __name__ == "__main__":
try:
main()
except BaseException, e:
msg = "buildbot failed\nException: " + str(e) + "\n" + traceback.format_exc() + "\n"
print(msg)
email_msg(msg)
| gpl-3.0 |
zaina/nova | nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py | 73 | 1614 | # Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
"""Change instances (project_id) index to cover (project_id, deleted)."""
meta = MetaData(bind=migrate_engine)
# Indexes can't be changed, we need to create the new one and delete
# the old one
instances = Table('instances', meta, autoload=True)
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id', 'deleted']:
LOG.info(_LI('Skipped adding instances_project_id_deleted_idx '
'because an equivalent index already exists.'))
break
else:
index = Index('instances_project_id_deleted_idx',
instances.c.project_id, instances.c.deleted)
index.create()
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id']:
index.drop()
| apache-2.0 |
vcgato29/Tomb | extras/dismissed/pytomb/tomblib/tomb.py | 10 | 3027 | '''
Module structure:
this contain a class, which is indeed just a collection of functions
(the methods are all static).
It's meant to behave in a way which is similar to the command line, for
Notes: consider moving to a more typical usage (instantiate, then use method)
to make it more configurable (ie set the tomb executable path).
'''
import subprocess
class Tomb(object):
'''
This is just a collection of static methods, so you should NOT instantiate
The methods are meant to resemble the command line interface as much as
possible
There is no support to things like threading, multiprocessing, whatever.
If you want to interact asynchronously with tomb, just do it in a separate
layer.
'''
tombexec = 'tomb'
def _check_exec_path(self):
'''Checks, using which, if tomb is available.
Returns None on error, the path on success.
'''
try:
path = subprocess.check_output(['which', 'tomb'])
except subprocess.CalledProcessError:
return None
return path
@classmethod
def check(cls, command, stdout=None, stderr=None, no_color=True, ignore_swap=False):
args = [command]
if no_color:
args += ['--no-color']
if ignore_swap:
args += ['--ignore-swap']
try:
subprocess.check_call([cls.tombexec, 'check'] + args, stdout=stdout, stderr=stderr)
except subprocess.CalledProcessError:
return False
return True
@classmethod
def create(cls, tombpath, tombsize,keypath, stdout=None, stderr=None,
no_color=True, ignore_swap=False):
'''If keypath is None, it will be created adjacent to the tomb.
This is unsafe, and you should NOT do it.
Note that this will invoke pinentry
no_color is supported as an option for short-lived retrocompatibility:
it will be removed as soon as no-color will be integrated
'''
args = [tombpath, '-s', tombsize]
if keypath is not None:
args += ['-k', keypath]
if no_color:
args += ['--no-color']
if ignore_swap:
args += ['--ignore-swap']
try:
subprocess.check_call([cls.tombexec, 'create'] + args, stdout=stdout, stderr=stderr)
except subprocess.CalledProcessError:
return False
return True
@classmethod
def open(cls, tombpath,keypath=None, no_color=True, ignore_swap=False):
args = [tombpath]
if keypath is not None:
args += ['-k', keypath]
if no_color:
args += ['--no-color']
if ignore_swap:
args += ['--ignore-swap']
try:
subprocess.check_call([cls.tombexec, 'open'] + args)
except subprocess.CalledProcessError:
return False
return True
if __name__ == '__main__':
#Debug stuff. Also useful for an example
print Tomb.create('/tmp/a.tomb', '10', '/tmp/akey')
| gpl-3.0 |
kamladi/textback-web | twilio/rest/resources/sip_domains.py | 1 | 3185 | from twilio.rest.resources import InstanceResource, ListResource
class IpAccessControlListMapping(InstanceResource):
def delete(self):
"""
Remove this mapping (disassociate the ACL from the Domain).
"""
return self.parent.delete_instance(self.name)
class IpAccessControlListMappings(ListResource):
name = "IpAccessControlListMappings"
key = "ip_access_control_list_mappings"
instance = IpAccessControlListMapping
def create(self, ip_access_control_list_sid, **kwargs):
"""Add a :class:`CredentialListMapping` to this domain.
:param sid: String identifier for an existing
:class:`CredentialList`.
"""
kwargs.update(ip_access_control_list_sid=ip_access_control_list_sid)
return self.create_instance(kwargs)
def delete(self, sid):
"""Remove a :class:`CredentialListMapping` from this domain.
:param sid: String identifier for a CredentialList resource
"""
return self.delete_instance(sid)
class CredentialListMapping(InstanceResource):
def delete(self):
"""
Remove this mapping (disassociate the CredentialList from the Domain).
"""
return self.parent.delete_instance(self.name)
class CredentialListMappings(ListResource):
name = "CredentialListMappings"
key = "credential_list_mappings"
instance = CredentialListMapping
def create(self, credential_list_sid, **kwargs):
"""Add a :class:`CredentialListMapping` to this domain.
:param sid: String identifier for an existing
:class:`CredentialList`.
"""
kwargs.update(credential_list_sid=credential_list_sid)
return self.create_instance(kwargs)
def delete(self, sid):
"""Remove a :class:`CredentialListMapping` from this domain.
:param sid: String identifier for a CredentialList resource
"""
return self.delete_instance(sid)
class SipDomain(InstanceResource):
subresources = [IpAccessControlListMappings, CredentialListMappings]
def update(self, **kwargs):
"""
Update this :class:`SipDomain`
"""
return self.parent.update_instance(self.name, kwargs)
def delete(self):
"""
Delete this domain.
"""
return self.parent.delete_instance(self.name)
class SipDomains(ListResource):
name = "Domains"
key = "sip_domains"
instance = SipDomain
def create(self, domain_name, **kwargs):
""" Create a :class:`SipDomain`.
:param domain_name: A unique domain name ending in '.sip.twilio.com'
"""
kwargs['domain_name'] = domain_name
return self.create_instance(kwargs)
def update(self, sid, **kwargs):
"""
Update a :class:`SipDomain`
:param sid: String identifier for a SipDomain resource
"""
return self.update_instance(sid, kwargs)
def delete(self, sid):
"""
Delete a :class:`SipDomain`.
:param sid: String identifier for a SipDomain resource
"""
return self.delete_instance(sid)
| mit |
realfastvla/rfpipe | rfpipe/search.py | 1 | 51205 | from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input#, str (numba signature bug)
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import numpy as np
from numba import jit, guvectorize, int64
import pyfftw
from kalman_detector import kalman_prepare_coeffs, kalman_significance
from concurrent import futures
from itertools import cycle
#from threading import Lock
import logging
logger = logging.getLogger(__name__)
try:
import rfgpu
except ImportError:
pass
###
# packaged searching functions
###
def dedisperse_search_cuda(st, segment, data, devicenum=None):
""" Run dedispersion, resample for all dm and dt.
Returns candcollection with optional clustering.
Grid and image on GPU (uses rfgpu from separate repo).
Uses state to define integrations to image based on segment, dm, and dt.
devicenum is int or tuple of ints that set gpu(s) to use.
If not set, then it can be inferred with distributed.
"""
from rfpipe import candidates, util
assert st.dtarr[0] == 1, "st.dtarr[0] assumed to be 1"
assert all([st.dtarr[dtind]*2 == st.dtarr[dtind+1]
for dtind in range(len(st.dtarr)-1)]), ("dtarr must increase "
"by factors of 2")
anydata = np.any(data)
if not anydata or st.prefs.searchtype is None:
if not anydata:
logger.info("Data is all zeros. Skipping search.")
return candidates.CandCollection(prefs=st.prefs,
metadata=st.metadata)
if isinstance(devicenum, int):
devicenums = (devicenum,)
elif isinstance(devicenum, str):
devicenums = (int(devicenum),)
elif isinstance(devicenum, tuple):
assert isinstance(devicenum[0], int)
devicenums = devicenum
elif devicenum is None:
# assume first gpu, but try to infer from worker name
devicenum = 0
try:
from distributed import get_worker
name = get_worker().name
devicenum = int(name.split('g')[1])
devicenums = (devicenum, devicenum+1) # TODO: smarter multi-GPU
logger.debug("Using name {0} to set GPU devicenum to {1}"
.format(name, devicenum))
except IndexError:
devicenums = (devicenum,)
logger.warning("Could not parse worker name {0}. Using default GPU devicenum {1}"
.format(name, devicenum))
except ValueError:
devicenums = (devicenum,)
logger.warning("No worker found. Using default GPU devicenum {0}"
.format(devicenum))
except ImportError:
devicenums = (devicenum,)
logger.warning("distributed not available. Using default GPU devicenum {0}"
.format(devicenum))
assert isinstance(devicenums, tuple)
logger.info("Using gpu devicenum(s): {0}".format(devicenums))
pc = st.get_pc(segment)
uvw = util.get_uvw_segment(st, segment, pc_mjd=pc, pc_radec=pc)
upix = st.npixx
vpix = st.npixy//2 + 1
grids = [rfgpu.Grid(st.nbl, st.nchan, st.readints, upix, vpix, dn) for dn in devicenums]
images = [rfgpu.Image(st.npixx, st.npixy, dn) for dn in devicenums]
for image in images:
image.add_stat('rms')
image.add_stat('pix')
# Data buffers on GPU
# Vis buffers identical on all GPUs. image buffer unique.
vis_raw = rfgpu.GPUArrayComplex((st.nbl, st.nchan, st.readints),
devicenums)
vis_grids = [rfgpu.GPUArrayComplex((upix, vpix), (dn,)) for dn in devicenums]
img_grids = [rfgpu.GPUArrayReal((st.npixx, st.npixy), (dn,)) for dn in devicenums]
# locks = [Lock() for dn in devicenums]
# Convert uv from lambda to us
u, v, w = uvw
u_us = 1e6*u[:, 0]/(1e9*st.freq[0])
v_us = 1e6*v[:, 0]/(1e9*st.freq[0])
# move Stokes I data in (assumes dual pol data)
vis_raw.data[:] = np.rollaxis(data.mean(axis=3), 0, 3)
# uv filtering
if st.prefs.uvmin is not None:
uvd = np.sqrt(u[:,0]**2 + v[:,0]**2)
short = uvd < st.prefs.uvmin
vis_raw.data[short] = 0j
vis_raw.h2d() # Send it to GPU memory of all
for grid in grids:
grid.set_uv(u_us, v_us) # u, v in us
grid.set_freq(st.freq*1e3) # freq in MHz
grid.set_cell(st.uvres) # uv cell size in wavelengths (== 1/FoV(radians))
grid.compute()
grid.conjugate(vis_raw)
# calc fraction of data gridded (any grid will do)
gridfrac = grid.get_nnz()/(st.nbl*st.nchan)
logger.info("Gridded {0}% of all baselines and channels".format(100*gridfrac))
# some prep if kalman significance is needed
if st.prefs.searchtype in ['imagek', 'armkimage', 'armk']:
# TODO: check that this is ok if pointing at bright source
spec_std, sig_ts, kalman_coeffs = util.kalman_prep(data)
if not np.all(sig_ts):
logger.info("sig_ts all zeros. Skipping search.")
return candidates.CandCollection(prefs=st.prefs,
metadata=st.metadata)
else:
spec_std, sig_ts, kalman_coeffs = None, [], []
# place to hold intermediate result lists
canddict = {}
canddict['candloc'] = []
canddict['l1'] = []
canddict['m1'] = []
canddict['snr1'] = []
canddict['immax1'] = []
for feat in st.searchfeatures:
canddict[feat] = []
for dtind in range(len(st.dtarr)):
if dtind > 0:
for grid in grids:
logger.info("Downsampling for dn {0}"
.format(devicenums[grids.index(grid)]))
grid.downsample(vis_raw)
# cy = cycle(devicenums)
threads = []
with futures.ThreadPoolExecutor(max_workers=2*len(devicenums)) as ex:
# for dmind, i_dn in list(zip(range(len(st.dmarr)), cy)):
# threads.append(ex.submit(rfgpu_gridimage, st, segment,
# grids[i_dn], images[i_dn], vis_raw,
# vis_grids[i_dn], img_grids[i_dn],
# dmind, dtind, devicenums[i_dn],
# locks[i_dn]))
ndm = len(st.dmarr)
ndn = len(devicenums)
for i_dn in range(ndn):
# dminds = list(range(ndm)[i_dn*ndm//ndn:(i_dn+1)*ndm//ndn]
dminds = [list(range(0+i, ndm, ndn)) for i in range(ndn)]
threads.append(ex.submit(rfgpu_gridimage, st, segment,
grids[i_dn], images[i_dn], vis_raw,
vis_grids[i_dn], img_grids[i_dn],
dminds[i_dn], dtind, devicenums[i_dn],
data=data, uvw=uvw, spec_std=spec_std,
sig_ts=sig_ts, kalman_coeffs=kalman_coeffs))
for thread in futures.as_completed(threads):
candlocs, l1s, m1s, snr1s, immax1s, snrks = thread.result()
canddict['candloc'] += candlocs
canddict['l1'] += l1s
canddict['m1'] += m1s
canddict['snr1'] += snr1s
canddict['immax1'] += immax1s
cc = candidates.make_candcollection(st, **canddict)
logger.info("First pass found {0} candidates in seg {1}."
.format(len(cc), segment))
# check whether too many candidates
if st.prefs.max_candfrac:
total_integrations = 0
for dtind in range(len(st.dtarr)):
for dmind in range(len(st.dmarr)):
total_integrations += len(st.get_search_ints(segment, dmind,
dtind))
if len(cc)/total_integrations > st.prefs.max_candfrac:
logger.warning("Too many candidates ({0} in {1} images). Flagging."
.format(len(cc), total_integrations))
cc = candidates.make_candcollection(st,
candloc=[(0, -1, 0, 0, 0)],
ncands=[len(cc)])
# add cluster labels to candidates
if st.prefs.clustercands:
cc = candidates.cluster_candidates(cc)
return cc
def rfgpu_gridimage(st, segment, grid, image, vis_raw, vis_grid, img_grid,
dminds, dtind, devicenum, data=None, uvw=None, spec_std=None,
sig_ts=[], kalman_coeffs=[]):
""" Dedisperse, grid, image, threshold with rfgpu
"""
from rfpipe import util
beamnum = 0
candlocs, l1s, m1s, snr1s, immax1s, snrks = [], [], [], [], [], []
for dmind in dminds:
delay = util.calc_delay(st.freq, st.freq.max(), st.dmarr[dmind],
st.inttime)
integrations = st.get_search_ints(segment, dmind, dtind)
if len(integrations) != 0:
minint = min(integrations)
maxint = max(integrations)
logger.info('Imaging {0} ints ({1}-{2}) in seg {3} at DM/dt {4:.1f}/{5}'
' with image {6}x{7} (uvres {8}) on GPU {9}'
.format(len(integrations), minint, maxint, segment,
st.dmarr[dmind], st.dtarr[dtind], st.npixx,
st.npixy, st.uvres, devicenum))
grid.set_shift(delay >> dtind) # dispersion shift per chan in samples
zeros = []
for i in integrations:
grid.operate(vis_raw, vis_grid, i)
image.operate(vis_grid, img_grid)
# calc snr
stats = image.stats(img_grid)
if stats['rms'] != 0.:
snr1 = stats['max']/stats['rms']
else:
snr1 = 0.
zeros.append(i)
# threshold image
if snr1 > st.prefs.sigma_image1:
candloc = (segment, i, dmind, dtind, beamnum)
xpeak = stats['xpeak']
ypeak = stats['ypeak']
l1, m1 = st.pixtolm((xpeak+st.npixx//2, ypeak+st.npixy//2))
if st.prefs.searchtype == 'image':
logger.info("Got one! SNR1 {0:.1f} candidate at {1} and (l, m) = ({2:.5f}, {3:.5f})"
.format(snr1, candloc, l1, m1))
candlocs.append(candloc)
l1s.append(l1)
m1s.append(m1)
snr1s.append(snr1)
immax1s.append(stats['max'])
elif st.prefs.searchtype == 'imagek':
# TODO: implement phasing on GPU
data_corr = dedisperseresample(data, delay,
st.dtarr[dtind],
parallel=st.prefs.nthread > 1,
resamplefirst=True)
spec = data_corr.take([i], axis=0)
util.phase_shift(spec, uvw=uvw, dl=l1, dm=m1)
spec = spec[0].real.mean(axis=2).mean(axis=0)
# TODO: this significance can be biased low if averaging in long baselines that are not phased well
# TODO: spec should be calculated from baselines used to measure l,m?
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
snrtot = (snrk**2 + snr1**2)**0.5
if snrtot > (st.prefs.sigma_kalman**2 + st.prefs.sigma_image1**2)**0.5:
logger.info("Got one! SNR1 {0:.1f} and SNRk {1:.1f} candidate at {2} and (l,m) = ({3:.5f}, {4:.5f})"
.format(snr1, snrk, candloc, l1, m1))
candlocs.append(candloc)
l1s.append(l1)
m1s.append(m1)
snr1s.append(snr1)
immax1s.append(stats['max'])
snrks.append(snrk)
elif st.prefs.searchtype == 'armkimage':
raise NotImplementedError
elif st.prefs.searchtype == 'armk':
raise NotImplementedError
elif st.prefs.searchtype is not None:
logger.warning("searchtype {0} not recognized"
.format(st.prefs.searchtype))
if zeros:
logger.warning("rfgpu rms is 0 in ints {0}."
.format(zeros))
return candlocs, l1s, m1s, snr1s, immax1s, snrks
def dedisperse_search_fftw(st, segment, data, wisdom=None):
""" Run dedispersion, resample for all dm and dt.
Returns candcollection with optional clustering.
Integrations can define subset of all available in data to search.
Default will take integrations not searched in neighboring segments.
** only supports threshold > image max (no min)
** dmind, dtind, beamnum assumed to represent current state of data
"""
from rfpipe import candidates, util
anydata = np.any(data)
if not anydata or st.prefs.searchtype is None:
if not anydata:
logger.info("Data is all zeros. Skipping search.")
return candidates.CandCollection(prefs=st.prefs,
metadata=st.metadata)
# some prep if kalman significance is needed
if st.prefs.searchtype in ['imagek', 'armkimage', 'armk']:
# TODO: check that this is ok if pointing at bright source
spec_std, sig_ts, kalman_coeffs = util.kalman_prep(data)
if not np.all(sig_ts):
logger.info("sig_ts all zeros. Skipping search.")
return candidates.CandCollection(prefs=st.prefs,
metadata=st.metadata)
else:
spec_std, sig_ts, kalman_coeffs = None, [], []
beamnum = 0
pc = st.get_pc(segment)
uvw = util.get_uvw_segment(st, segment, pc_mjd=pc, pc_radec=pc)
# place to hold intermediate result lists
canddict = {}
canddict['candloc'] = []
for feat in st.searchfeatures:
canddict[feat] = []
for dtind in range(len(st.dtarr)):
for dmind in range(len(st.dmarr)):
# set search integrations
integrations = st.get_search_ints(segment, dmind, dtind)
if len(integrations) == 0:
continue
minint = min(integrations)
maxint = max(integrations)
logger.info('{0} search of {1} ints ({2}-{3}) in seg {4} at DM/dt '
'{5:.1f}/{6} with image {7}x{8} (uvres {9}) with fftw'
.format(st.prefs.searchtype, len(integrations), minint,
maxint, segment, st.dmarr[dmind],
st.dtarr[dtind], st.npixx,
st.npixy, st.uvres))
# correct data
delay = util.calc_delay(st.freq, st.freq.max(), st.dmarr[dmind],
st.inttime)
data_corr = dedisperseresample(data, delay, st.dtarr[dtind],
parallel=st.prefs.nthread > 1,
resamplefirst=False)
# run search
if st.prefs.searchtype in ['image', 'imagek']:
images = grid_image(data_corr, uvw, st.npixx, st.npixy, st.uvres,
'fftw', st.prefs.nthread, wisdom=wisdom,
integrations=integrations)
for i, image in enumerate(images):
immax1 = image.max()
snr1 = immax1/image.std()
if snr1 > st.prefs.sigma_image1:
candloc = (segment, integrations[i], dmind, dtind, beamnum)
l1, m1 = st.pixtolm(np.where(image == immax1))
# if set, use sigma_kalman as second stage filter
if st.prefs.searchtype == 'imagek':
spec = data_corr.take([integrations[i]], axis=0)
util.phase_shift(spec, uvw=uvw, dl=l1, dm=m1)
spec = spec[0].real.mean(axis=2).mean(axis=0)
# TODO: this significance can be biased low if averaging in long baselines that are not phased well
# TODO: spec should be calculated from baselines used to measure l,m?
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
snrtot = (snrk**2 + snr1**2)**0.5
if snrtot > (st.prefs.sigma_kalman**2 + st.prefs.sigma_image1**2)**0.5:
logger.info("Got one! SNR1 {0:.1f} and SNRk {1:.1f} candidate at {2} and (l,m) = ({3:.5f}, {4:.5f})"
.format(snr1, snrk, candloc, l1, m1))
canddict['candloc'].append(candloc)
canddict['l1'].append(l1)
canddict['m1'].append(m1)
canddict['snr1'].append(snr1)
canddict['immax1'].append(immax1)
canddict['snrk'].append(snrk)
elif st.prefs.searchtype == 'image':
logger.info("Got one! SNR1 {0:.1f} candidate at {1} and (l, m) = ({2:.5f}, {3:.5f})"
.format(snr1, candloc, l1, m1))
canddict['candloc'].append(candloc)
canddict['l1'].append(l1)
canddict['m1'].append(m1)
canddict['snr1'].append(snr1)
canddict['immax1'].append(immax1)
elif st.prefs.searchtype in ['armkimage', 'armk']:
armk_candidates = search_thresh_armk(st, data_corr, uvw,
integrations=integrations,
spec_std=spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
for candind, snrarms, snrk, armloc, peakxy, lm in armk_candidates:
candloc = (segment, candind, dmind, dtind, beamnum)
# if set, use sigma_kalman as second stage filter
if st.prefs.searchtype == 'armkimage':
image = grid_image(data_corr, uvw, st.npixx_full,
st.npixy_full, st.uvres, 'fftw',
st.prefs.nthread,
wisdom=wisdom, integrations=candind)
peakx, peaky = np.where(image[0] == image[0].max())
l1, m1 = st.calclm(st.npixx_full, st.npixy_full,
st.uvres, peakx[0], peaky[0])
immax1 = image.max()
snr1 = immax1/image.std()
if snr1 > st.prefs.sigma_image1:
logger.info("Got one! SNRarms {0:.1f} and SNRk "
"{1:.1f} and SNR1 {2:.1f} candidate at"
" {3} and (l,m) = ({4:.5f}, {5:.5f})"
.format(snrarms, snrk, snr1,
candloc, l1, m1))
canddict['candloc'].append(candloc)
canddict['l1'].append(l1)
canddict['m1'].append(m1)
canddict['snrarms'].append(snrarms)
canddict['snrk'].append(snrk)
canddict['snr1'].append(snr1)
canddict['immax1'].append(immax1)
elif st.prefs.searchtype == 'armk':
l1, m1 = lm
logger.info("Got one! SNRarms {0:.1f} and SNRk {1:.1f} "
"candidate at {2} and (l,m) = ({3:.5f}, {4:.5f})"
.format(snrarms, snrk, candloc, l1, m1))
canddict['candloc'].append(candloc)
canddict['l1'].append(l1)
canddict['m1'].append(m1)
canddict['snrarms'].append(snrarms)
canddict['snrk'].append(snrk)
elif st.prefs.searchtype is not None:
raise NotImplemented("only searchtype=image, imagek, armk, armkimage implemented")
# save search results and its features
cc = candidates.make_candcollection(st, **canddict)
logger.info("First pass found {0} candidates in seg {1}."
.format(len(cc), segment))
# check whether too many candidates
if st.prefs.max_candfrac:
total_integrations = 0
for dtind in range(len(st.dtarr)):
for dmind in range(len(st.dmarr)):
total_integrations += len(st.get_search_ints(segment, dmind,
dtind))
if len(cc)/total_integrations > st.prefs.max_candfrac:
logger.warning("Too many candidates ({0} in {1} images). Flagging."
.format(len(cc), total_integrations))
cc = candidates.make_candcollection(st,
candloc=[(0, -1, 0, 0, 0)],
ncands=[len(cc)])
# add cluster labels to candidates
if st.prefs.clustercands:
cc = candidates.cluster_candidates(cc)
# TODO: find a way to return values as systematic data quality test
return cc
def grid_image(data, uvw, npixx, npixy, uvres, fftmode, nthread, wisdom=None,
integrations=None):
""" Grid and image data.
Optionally image integrations in list i.
fftmode can be fftw or cuda.
nthread is number of threads to use
"""
if integrations is None:
integrations = list(range(len(data)))
elif not isinstance(integrations, list):
integrations = [integrations]
if fftmode == 'fftw':
logger.debug("Imaging with fftw on {0} threads".format(nthread))
grids = grid_visibilities(data.take(integrations, axis=0), uvw, npixx,
npixy, uvres, parallel=nthread > 1)
images = image_fftw(grids, nthread=nthread, wisdom=wisdom)
elif fftmode == 'cuda':
logger.warning("Imaging with cuda not yet supported.")
images = image_cuda()
else:
logger.warning("Imaging fftmode {0} not supported.".format(fftmode))
return images
def image_cuda():
""" Run grid and image with rfgpu
TODO: update to use rfgpu
"""
pass
def image_fftw(grids, nthread=1, wisdom=None, axes=(1, 2)):
""" Plan pyfftw inverse fft and run it on input grids.
Allows fft on 1d (time, npix) or 2d (time, npixx, npixy) grids.
axes refers to dimensions of fft, so (1, 2) will do 2d fft on
last two axes of (time, npixx, nipxy) data, while (1) will do
1d fft on last axis of (time, npix) data.
Returns recentered fftoutput for each integration.
"""
if wisdom is not None:
logger.debug('Importing wisdom...')
pyfftw.import_wisdom(wisdom)
logger.debug("Starting pyfftw ifft2")
images = np.zeros_like(grids)
# images = pyfftw.interfaces.numpy_fft.ifft2(grids, auto_align_input=True,
# auto_contiguous=True,
# planner_effort='FFTW_MEASURE',
# overwrite_input=True,
# threads=nthread)
# nints, npixx, npixy = images.shape
#
# return np.fft.fftshift(images.real, (npixx//2, npixy//2))
fft_obj = pyfftw.FFTW(grids, images, axes=axes, direction="FFTW_BACKWARD")
fft_obj.execute()
logger.debug('Recentering fft output...')
return np.fft.fftshift(images.real, axes=axes)
def grid_visibilities(data, uvw, npixx, npixy, uvres, parallel=False):
""" Grid visibilities into rounded uv coordinates """
logger.debug('Gridding {0} ints at ({1}, {2}) pix and {3} '
'resolution in {4} mode.'.format(len(data), npixx, npixy,
uvres,
['single', 'parallel'][parallel]))
u, v, w = uvw
grids = np.zeros(shape=(data.shape[0], npixx, npixy),
dtype=np.complex64)
if parallel:
_ = _grid_visibilities_gu(data, u, v, w, npixx, npixy, uvres, grids)
else:
_grid_visibilities_jit(data, u, v, w, npixx, npixy, uvres, grids)
return grids
@jit(nogil=True, nopython=True, cache=True)
def _grid_visibilities_jit(data, u, v, w, npixx, npixy, uvres, grids):
b""" Grid visibilities into rounded uv coordinates using jit on single core.
Rounding not working here, so minor differences with original and
guvectorized versions.
"""
nint, nbl, nchan, npol = data.shape
for j in range(nbl):
for k in range(nchan):
ubl = int64(np.round(u[j, k]/uvres, 0))
vbl = int64(np.round(v[j, k]/uvres, 0))
if (np.abs(ubl < npixx//2)) and (np.abs(vbl < npixy//2)):
umod = int64(np.mod(ubl, npixx))
vmod = int64(np.mod(vbl, npixy))
for i in range(nint):
for l in range(npol):
grids[i, umod, vmod] += data[i, j, k, l]
return grids
@guvectorize([str("void(complex64[:,:,:], float32[:,:], float32[:,:], float32[:,:], int64, int64, int64, complex64[:,:])")],
str("(n,m,l),(n,m),(n,m),(n,m),(),(),(),(o,p)"),
target='parallel', nopython=True)
def _grid_visibilities_gu(data, us, vs, ws, npixx, npixy, uvres, grid):
b""" Grid visibilities into rounded uv coordinates for multiple cores"""
ubl = np.zeros(us.shape, dtype=int64)
vbl = np.zeros(vs.shape, dtype=int64)
for j in range(data.shape[0]):
for k in range(data.shape[1]):
ubl[j, k] = int64(np.round(us[j, k]/uvres, 0))
vbl[j, k] = int64(np.round(vs[j, k]/uvres, 0))
if (np.abs(ubl[j, k]) < npixx//2) and \
(np.abs(vbl[j, k]) < npixy//2):
u = np.mod(ubl[j, k], npixx)
v = np.mod(vbl[j, k], npixy)
for l in range(data.shape[2]):
grid[u, v] += data[j, k, l]
###
# dedispersion and resampling
###
def dedisperse(data, delay, parallel=False):
""" Shift data in time (axis=0) by channel-dependent value given in
delay. Returns new array with time length shortened by max delay in
integrations. wraps _dedisperse to add logging.
Can set mode to "single" or "multi" to use different functions.
"""
if not np.any(data):
return np.array([])
logger.info('Dedispersing up to delay shift of {0} integrations'
.format(delay.max()))
nint, nbl, nchan, npol = data.shape
newsh = (nint-delay.max(), nbl, nchan, npol)
assert nchan == len(delay), "Number of channels in delay must be same as in data"
if parallel:
data = data.copy()
_ = _dedisperse_gu(np.swapaxes(data, 0, 1), delay)
return data[0:len(data)-delay.max()]
else:
result = np.zeros(shape=newsh, dtype=data.dtype)
_dedisperse_jit(np.require(data, requirements='W'), delay, result)
return result
@jit(nogil=True, nopython=True, cache=True)
def _dedisperse_jit(data, delay, result):
nint, nbl, nchan, npol = data.shape
for k in range(nchan):
for i in range(nint-delay.max()):
iprime = i + delay[k]
for l in range(npol):
for j in range(nbl):
result[i, j, k, l] = data[iprime, j, k, l]
@guvectorize([str("void(complex64[:,:,:], int64[:])")], str("(n,m,l),(m)"),
target='parallel', nopython=True)
def _dedisperse_gu(data, delay):
b""" Multicore dedispersion via numpy broadcasting.
Requires that data be in axis order (nbl, nint, nchan, npol), so typical
input visibility array must have view from "np.swapaxis(data, 0, 1)".
"""
if delay.max() > 0:
for i in range(data.shape[0]-delay.max()):
for j in range(data.shape[1]):
iprime = i + delay[j]
for k in range(data.shape[2]):
data[i, j, k] = data[iprime, j, k]
def dedisperse_roll(data, delay):
""" Using numpy roll to dedisperse.
This avoids trimming data to area with valid delays,
which is appropriate for dm-time data generation.
TODO: check that -delay is correct way
"""
nf, nt = data.shape
assert nf == len(delay), "Delay must be same length as data freq axis"
dataout = np.vstack([np.roll(arr, -delay[i]) for i, arr in enumerate(data)])
return dataout
def make_dmt(data, dmi, dmf, dmsteps, freqs, inttime, mode='GPU', devicenum=0):
""" Disperse data to a range of dms.
Good transients have characteristic shape in dm-time space.
"""
if mode == 'GPU':
dmt = gpu_dmtime(data, dmi, dmf, dmsteps, freqs, inttime,
devicenum=devicenum)
else:
dmt = cpu_dmtime(data, dmi, dmf, dmsteps, freqs, inttime)
return dmt
def cpu_dmtime(data, dmi, dmf, dmsteps, freqs, inttime):
""" Make dm-time plot. Called by make_dmt
"""
from rfpipe import util
dmt = np.zeros((dmsteps, data.shape[1]), dtype=np.float32)
dm_list = np.linspace(dmi, dmf, dmsteps)
for ii, dm in enumerate(dm_list):
delay = util.calc_delay(freqs, freqs.max(), dm, inttime)
dmt[ii, :] = dedisperse_roll(data, delay).sum(axis=0)
return dmt
def gpu_dmtime(ft, dm_i, dm_f, dmsteps, freqs, inttime, devicenum=0):
""" Make dm-time plot. Called by make_dmt
"""
from numba import cuda
import math
from rfpipe import util
import os
os.environ['NUMBA_CUDA_MAX_PENDING_DEALLOCS_COUNT'] = '1'
dm_list = np.linspace(dm_i, dm_f, dmsteps)
delays = np.zeros((dmsteps, ft.shape[0]), dtype=np.int32)
for ii, dm in enumerate(dm_list):
delays[ii,:] = util.calc_delay(freqs, freqs.max(), dm, inttime).astype('int32')
cuda.select_device(devicenum)
stream = cuda.stream()
dm_time = np.zeros((delays.shape[0], int(ft.shape[1])), dtype=np.float32)
@cuda.jit(fastmath=True)
def gpu_dmt(cand_data_in, all_delays, cand_data_out):
ii, jj, kk = cuda.grid(3)
if ii < cand_data_in.shape[0] and jj < cand_data_out.shape[1] and kk < all_delays.shape[1]:
cuda.atomic.add(cand_data_out, (kk, jj), cand_data_in[ii,
(jj + all_delays[ii,kk])%cand_data_in.shape[1]])
with cuda.defer_cleanup():
all_delays = cuda.to_device(delays.T, stream=stream)
dmt_return = cuda.device_array(dm_time.shape, dtype=np.float32, stream=stream)
cand_data_in = cuda.to_device(np.array(ft, dtype=np.float32), stream=stream)
threadsperblock = (16, 4, 16)
blockspergrid_x = math.ceil(cand_data_in.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(cand_data_in.shape[1] / threadsperblock[1])
blockspergrid_z = math.ceil(dm_time.shape[0] / threadsperblock[2])
blockspergrid = (blockspergrid_x, blockspergrid_y, blockspergrid_z)
gpu_dmt[blockspergrid, threadsperblock, stream](cand_data_in, all_delays, dmt_return)
dm_time = dmt_return.copy_to_host(stream=stream)
# cuda.close()
return dm_time
def resample(data, dt, parallel=False):
""" Resample (integrate) by factor dt and return new data structure
wraps _resample to add logging.
Can set mode to "single" or "multi" to use different functions.
"""
if not np.any(data):
return np.array([])
len0 = data.shape[0]
logger.info('Resampling data of length {0} by a factor of {1}'
.format(len0, dt))
nint, nbl, nchan, npol = data.shape
newsh = (int64(nint//dt), nbl, nchan, npol)
if parallel:
data = data.copy()
_ = _resample_gu(np.swapaxes(data, 0, 3), dt)
return data[:len0//dt]
else:
result = np.zeros(shape=newsh, dtype=data.dtype)
_resample_jit(np.require(data, requirements='W'), dt, result)
return result
@jit(nogil=True, nopython=True, cache=True)
def _resample_jit(data, dt, result):
nint, nbl, nchan, npol = data.shape
for j in range(nbl):
for k in range(nchan):
for l in range(npol):
for i in range(int64(nint//dt)):
iprime = int64(i*dt)
result[i, j, k, l] = data[iprime, j, k, l]
for r in range(1, dt):
result[i, j, k, l] += data[iprime+r, j, k, l]
result[i, j, k, l] = result[i, j, k, l]/dt
@guvectorize([str("void(complex64[:], int64)")], str("(n),()"),
target="parallel", nopython=True)
def _resample_gu(data, dt):
b""" Multicore resampling via numpy broadcasting.
Requires that data be in nint axisto be last, so input
visibility array must have view from "np.swapaxis(data, 0, 3)".
*modifies original memory space* (unlike _resample_jit)
"""
if dt > 1:
for i in range(data.shape[0]//dt):
iprime = int64(i*dt)
data[i] = data[iprime]
for r in range(1, dt):
data[i] += data[iprime+r]
data[i] = data[i]/dt
def dedisperseresample(data, delay, dt, parallel=False, resamplefirst=True):
""" Dedisperse and resample in single function.
parallel controls use of multicore versions of algorithms.
resamplefirst is parameter that reproduces rfgpu order.
"""
if not np.any(data):
return np.array([])
logger.info('Correcting by delay/resampling {0}/{1} ints in {2} mode'
.format(delay.max(), dt, ['single', 'parallel'][parallel]))
nint, nbl, nchan, npol = data.shape
newsh = (int64(nint-delay.max())//dt, nbl, nchan, npol)
if resamplefirst:
result = resample(data, dt, parallel=parallel)
result = dedisperse(result, delay//dt, parallel=parallel)
return result
else:
if parallel:
data = data.copy()
_ = _dedisperseresample_gu(np.swapaxes(data, 0, 1),
delay, dt)
return data[0:(len(data)-delay.max())//dt]
else:
result = np.zeros(shape=newsh, dtype=data.dtype)
_dedisperseresample_jit(data, delay, dt, result)
return result
@jit(nogil=True, nopython=True, cache=True)
def _dedisperseresample_jit(data, delay, dt, result):
nint, nbl, nchan, npol = data.shape
nintout = int64(len(result))
for j in range(nbl):
for l in range(npol):
for k in range(nchan):
for i in range(nintout):
weight = int64(0)
for r in range(dt):
iprime = int64(i*dt + delay[k] + r)
val = data[iprime, j, k, l]
result[i, j, k, l] += val
if val != 0j:
weight += 1
if weight > 0:
result[i, j, k, l] = result[i, j, k, l]/weight
else:
result[i, j, k, l] = weight
return result
@guvectorize([str("void(complex64[:,:,:], int64[:], int64)")],
str("(n,m,l),(m),()"), target="parallel", nopython=True)
def _dedisperseresample_gu(data, delay, dt):
if delay.max() > 0 or dt > 1:
nint, nchan, npol = data.shape
for l in range(npol):
for k in range(nchan):
for i in range((nint-delay.max())//dt):
weight = int64(0)
for r in range(dt):
iprime = int64(i*dt + delay[k] + r)
val = data[iprime, k, l]
if r == 0:
data[i, k, l] = val
else:
data[i, k, l] += val
if val != 0j:
weight += 1
if weight > 0:
data[i, k, l] = data[i, k, l]/weight
else:
data[i, k, l] = weight
###
# cascading 3arm imaging with kalman filter
###
def search_thresh_armk(st, data, uvw, integrations=None, spec_std=None,
sig_ts=[], coeffs=[]):
"""
"""
from rfpipe import util
if integrations is None:
integrations = list(range(len(data)))
elif isinstance(integrations, int):
integrations = [integrations]
if spec_std is None:
if data.shape[0] > 1:
spec_std = data.real.mean(axis=3).mean(axis=1).std(axis=0)
else:
spec_std = data[0].real.mean(axis=2).std(axis=0)
if not len(sig_ts):
sig_ts = [x*np.median(spec_std) for x in [0.3, 0.1, 0.03, 0.01]]
if not len(coeffs):
if not np.any(spec_std):
logger.warning("spectrum std all zeros. Not estimating coeffs.")
kalman_coeffs = []
else:
sig_ts, kalman_coeffs = kalman_prepare_coeffs(spec_std)
if not np.all(np.nan_to_num(sig_ts)):
kalman_coeffs = []
n_max_cands = 10 # TODO set with function of sigma_arms
u, v, w = uvw
ch0 = 0
u0 = u[:, ch0]
v0 = v[:, ch0]
w0 = w[:, ch0]
order = ['N', 'E', 'W']
T012 = maparms(st=st, u0=u0, v0=v0, order=order)
arm0, arm1, arm2 = image_arms(st, data.take(integrations, axis=0), uvw,
order=order)
# TODO: This is not returning bright simulated transients. Why?
candinds, armlocs, snrarms = thresh_arms(arm0, arm1, arm2, T012,
st.prefs.sigma_arm,
st.prefs.sigma_arms,
n_max_cands)
# kalman filter integrated for now
T01U = maparms(st=st, u0=u0, v0=v0, order=[order[0], order[1]],
e2=(1., 0.))
T01V = maparms(st=st, u0=u0, v0=v0, order=[order[0], order[1]],
e2=(0., 1.))
T12U = maparms(st=st, u0=u0, v0=v0, order=[order[1], order[2]],
e2=(1., 0.))
T12V = maparms(st=st, u0=u0, v0=v0, order=[order[1], order[2]],
e2=(0., 1.))
T20U = maparms(st=st, u0=u0, v0=v0, order=[order[2], order[0]],
e2=(1., 0.))
T20V = maparms(st=st, u0=u0, v0=v0, order=[order[2], order[0]],
e2=(0., 1.))
npix = max(st.npixx_full, st.npixy_full)
kpeaks = []
for i in range(len(candinds)):
kpeak = ()
snrlast = 0. # initialize snr to find max per i
for j in range(n_max_cands):
if snrarms[i, j] > 0.:
spec = data.take([integrations[candinds[i, j]]], axis=0).copy()
armloc0, armloc1, armloc2 = armlocs[i, j]
# find x,y loc from common loc inferred from each arm pair
peakx01 = projectarms(armloc0-npix//2, armloc1-npix//2, T01U,
st.npixx_full)
peaky01 = projectarms(armloc0-npix//2, armloc1-npix//2, T01V,
st.npixy_full)
peakx12 = projectarms(armloc1-npix//2, armloc2-npix//2, T12U,
st.npixx_full)
peaky12 = projectarms(armloc1-npix//2, armloc2-npix//2, T12V,
st.npixy_full)
peakx20 = projectarms(armloc2-npix//2, armloc0-npix//2, T20U,
st.npixx_full)
peaky20 = projectarms(armloc2-npix//2, armloc0-npix//2, T20V,
st.npixy_full)
peakx = np.sort([peakx01, peakx12, peakx20])[1]
peaky = np.sort([peaky01, peaky12, peaky20])[1]
l, m = st.calclm(st.npixx_full, st.npixy_full, st.uvres, peakx,
peaky)
util.phase_shift(spec, uvw=uvw, dl=l, dm=m)
spec = spec[0].real.mean(axis=2).mean(axis=0)
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
snrtot = (snrk**2 + snrarms[i, j]**2)**0.5
if (snrtot > (st.prefs.sigma_kalman**2 + st.prefs.sigma_arms**2)**0.5) and (snrtot > snrlast):
kpeak = (integrations[candinds[i, j]], snrarms[i, j],
snrk, (armloc0, armloc1, armloc2), (peakx, peaky),
(l, m))
snrlast = snrtot
if len(kpeak):
kpeaks.append(kpeak)
return kpeaks
def image_arms(st, data, uvw, wisdom=None, order=['N', 'E', 'W']):
""" Calculate grids for all three arms of VLA.
Uses maximum of ideal number of pixels on side of image.
"""
npix = max(st.npixx_full, st.npixy_full)
grids_arm0 = grid_arm(data, uvw, st.blind_arm(order[0]), npix, st.uvres)
arm0 = image_fftw(grids_arm0, axes=(1,), wisdom=wisdom)
grids_arm1 = grid_arm(data, uvw, st.blind_arm(order[1]), npix, st.uvres)
arm1 = image_fftw(grids_arm1, axes=(1,), wisdom=wisdom)
grids_arm2 = grid_arm(data, uvw, st.blind_arm(order[2]), npix, st.uvres)
arm2 = image_fftw(grids_arm2, axes=(1,), wisdom=wisdom)
return arm0, arm1, arm2
def grid_arm(data, uvw, arminds, npix, uvres):
""" Grids visibilities along 1d arms of array.
arminds defines a subset of baselines that for a linear array.
grids as radius with sign of the u coordinate.
defines a convention of uv distance as positive in u direction.
Returns FFT output (time vs pixel) from gridded 1d visibilities.
"""
u, v, w = uvw
# TODO: check colinearity and "w"
# TODO: integrate with unit vector approach in mapper function?
sign = np.sign(u.take(arminds, axis=0))
uvd = sign*(u.take(arminds, axis=0)**2 + v.take(arminds, axis=0)**2)**0.5
grids = np.zeros(shape=(data.shape[0], npix), dtype=np.complex64)
grid_visibilities_arm_jit(data.take(arminds, axis=1), uvd, npix,
uvres, grids)
return grids
def maparms(st=None, u0=None, v0=None, e0=None, e1=None, e2=None,
order=['N', 'E', 'W']):
""" Generates a function for geometric mapping between three unit vectors.
0,1,2 indiced are marking the order of the vectors.
They can be measured with (st, u0, v0) or given with e0, e1, e2.
dot(T012,(A0,A1)) = A2, where A0,A1 are locations on arms 0,1
and A2 is the location on arm 2.
Convention defined in gridding for vectors to be positive in u direction.
u,v are 1d of length nbl chosen at channel 0
order can be arm names N, E, W
"""
assert all([o in ['N', 'E', 'W'] for o in order])
if e0 is None:
e0 = get_uvunit(st.blind_arm(order[0]), u0, v0)
if e1 is None:
e1 = get_uvunit(st.blind_arm(order[1]), u0, v0)
if e2 is None:
e2 = get_uvunit(st.blind_arm(order[2]), u0, v0)
# they should be unit vectors (within rounding errors)
assert (np.linalg.norm(e0) > 0.99) and (np.linalg.norm(e0) < 1.01), "Problem with unit vector e0: {0}".format(e0)
assert (np.linalg.norm(e1) > 0.99) and (np.linalg.norm(e1) < 1.01), "Problem with unit vector e1: {1}".format(e1)
assert (np.linalg.norm(e2) > 0.99) and (np.linalg.norm(e2) < 1.01), "Problem with unit vector e2: {2}".format(e2)
T012 = np.dot(e2, np.linalg.inv(np.array((e0, e1))))
return T012
def get_uvunit(blind, u, v):
""" Calculate uv unit vector for indices blind of u/v.
"""
# positive u convention
ind = blind[np.argmax(u.take(blind, axis=0)**2 + v.take(blind, axis=0)**2)]
l = (u[ind]**2 + v[ind]**2)**0.5
e = (u[ind]/l * np.sign(u[ind]), v[ind]/l * np.sign(u[ind]))
return e
@jit(nopython=True, cache=True)
def projectarms(dpix0, dpix1, T012, npix2):
""" Take any two locations relative to center and project in a new direction.
npix2 is size of direction2.
"""
newpix = int(round(np.dot(np.array([float(dpix0), float(dpix1)]),
T012) + npix2//2))
return newpix
@jit(nopython=True, cache=True)
def thresh_arms(arm0, arm1, arm2, T012, sigma_arm, sigma_trigger, n_max_cands):
""" Run 3-arm search with sigma_arm per arm and sigma_trigger overall.
arm0/1/2 are the 1d arm "images" and T012 is the coefficients to map arm0/1
positions to arm2.
Number of candidates is limit to n_max_cands per integration.
Highest snrarm candidates returned up to n_max_cands per integration.
"""
assert len(arm0[0]) == len(arm1[0])
assert len(arm2[0]) == len(arm1[0])
# TODO: assure stds is calculated over larger sample than 1 int
std_arm0 = arm0.std() # over all ints and pixels
std_arm1 = arm1.std()
std_arm2 = arm2.std()
nint = len(arm0)
npix = len(arm0[0])
effective_3arm_sigma = (std_arm0**2 + std_arm1**2 + std_arm2**2)**0.5
effective_eta_trigger = sigma_trigger * effective_3arm_sigma
candinds = np.zeros(shape=(nint, n_max_cands), dtype=np.int64)
armlocs = np.zeros(shape=(nint, n_max_cands, 3), dtype=np.int64)
snrarms = np.zeros(shape=(nint, n_max_cands), dtype=np.float64)
for i in range(len(arm0)):
success_counter = 0
indices_arr0 = np.nonzero(arm0[i] > sigma_arm*std_arm0)[0]
indices_arr1 = np.nonzero(arm1[i] > sigma_arm*std_arm1)[0]
for ind0 in indices_arr0:
for ind1 in indices_arr1:
ind2 = projectarms(ind0-npix//2, ind1-npix//2, T012, npix)
# check score if intersections are all on grid
if ind2 < npix:
score = arm0[i, ind0] + arm1[i, ind1] + arm2[i, ind2]
else:
score = 0.
if score > effective_eta_trigger:
snr_3arm = score/effective_3arm_sigma
# TODO find better logic (heap?)
success_counter0 = success_counter
while snrarms[i, success_counter] > snr_3arm:
success_counter += 1
if success_counter >= n_max_cands:
success_counter = 0
if success_counter == success_counter0:
break
if snrarms[i, success_counter] < snr_3arm:
snrarms[i, success_counter] = snr_3arm
armlocs[i, success_counter] = (ind0, ind1, ind2)
candinds[i, success_counter] = i
success_counter += 1
if success_counter >= n_max_cands:
success_counter = 0
return candinds, armlocs, snrarms
@jit(nogil=True, nopython=True, cache=True)
def grid_visibilities_arm_jit(data, uvd, npix, uvres, grids):
b""" Grid visibilities into rounded uvd coordinates using jit on single core.
data/uvd are selected for a single arm
"""
nint, nbl, nchan, npol = data.shape
# rounding not available in numba
# ubl = np.round(us/uvres, 0).astype(np.int32)
# vbl = np.round(vs/uvres, 0).astype(np.int32)
for j in range(nbl):
for k in range(nchan):
uvbl = int64(uvd[j, k]/uvres)
if (np.abs(uvbl < npix//2)):
uvmod = int64(np.mod(uvbl, npix))
for i in range(nint):
for l in range(npol):
grids[i, uvmod] += data[i, j, k, l]
return grids
def set_wisdom(npixx, npixy=None):
""" Run single ifft to prep fftw wisdom in worker cache
Supports 1d and 2d ifft.
"""
logger.info('Calculating FFT wisdom...')
if npixy is not None:
arr = pyfftw.empty_aligned((npixx, npixy), dtype='complex64', n=16)
fft_arr = pyfftw.interfaces.numpy_fft.ifft2(arr, auto_align_input=True,
auto_contiguous=True,
planner_effort='FFTW_MEASURE')
else:
arr = pyfftw.empty_aligned((npixx), dtype='complex64', n=16)
fft_arr = pyfftw.interfaces.numpy_fft.ifft(arr, auto_align_input=True,
auto_contiguous=True,
planner_effort='FFTW_MEASURE')
return pyfftw.export_wisdom()
| bsd-3-clause |
pylessard/python-udsoncan | test/client/test_control_dtc_setting.py | 1 | 5730 | from udsoncan.client import Client
from udsoncan import services
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestControlDTCSettings(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_set_on(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x85\x01")
self.conn.fromuserqueue.put(b"\xC5\x01") # Positive response
def _test_set_on(self):
response = self.udsclient.control_dtc_setting(services.ControlDTCSetting.SettingType.on)
self.assertEqual(response.service_data.setting_type_echo, services.ControlDTCSetting.SettingType.on)
def test_set_on_spr(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x85\x81")
self.conn.fromuserqueue.put("wait") # Synchronize
def _test_set_on_spr(self):
with self.udsclient.suppress_positive_response:
response = self.udsclient.control_dtc_setting(services.ControlDTCSetting.SettingType.on)
self.assertEqual(response, None)
self.conn.fromuserqueue.get(timeout=0.2) #Avoid closing connection prematurely
def test_set_on_with_extra_data(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x85\x01\x11\x22\x33")
self.conn.fromuserqueue.put(b"\xC5\x01") # Positive response
def _test_set_on_with_extra_data(self):
response = self.udsclient.control_dtc_setting(setting_type=services.ControlDTCSetting.SettingType.on, data=b'\x11\x22\x33')
self.assertEqual(response.service_data.setting_type_echo, services.ControlDTCSetting.SettingType.on)
def test_set_on_harmless_extra_bytes_in_response(self):
self.wait_request_and_respond(b"\xC5\x01\x77\x88\x99") # Positive response
def _test_set_on_harmless_extra_bytes_in_response(self):
response = self.udsclient.control_dtc_setting(setting_type=services.ControlDTCSetting.SettingType.on)
self.assertEqual(response.service_data.setting_type_echo, services.ControlDTCSetting.SettingType.on)
def test_set_params_denied_exception(self):
self.wait_request_and_respond(b"\x7F\x85\x45") #Request Out Of Range
def _test_set_params_denied_exception(self):
with self.assertRaises(NegativeResponseException) as handle:
self.udsclient.control_dtc_setting(setting_type=0x45)
def test_set_params_denied_no_exception(self):
self.wait_request_and_respond(b"\x7F\x85\x45") #Request Out Of Range
def _test_set_params_denied_no_exception(self):
self.udsclient.config['exception_on_negative_response'] = False
response = self.udsclient.control_dtc_setting(setting_type=0x45)
self.assertTrue(response.valid)
self.assertFalse(response.positive)
def test_set_params_invalid_service_exception(self):
self.wait_request_and_respond(b"\x00\x45") #Inexistent Service
def _test_set_params_invalid_service_exception(self):
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.control_dtc_setting(setting_type=0x45)
def test_set_params_invalid_service_no_exception(self):
self.wait_request_and_respond(b"\x00\x45") #Inexistent Service
def _test_set_params_invalid_service_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
response = self.udsclient.control_dtc_setting(setting_type=0x45)
self.assertFalse(response.valid)
def test_wrong_service_exception(self):
self.wait_request_and_respond(b"\x7E\x22") # Valid but wrong service (Tester Present)
def _test_wrong_service_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.control_dtc_setting(setting_type=0x22)
def test_wrong_service_no_exception(self):
self.wait_request_and_respond(b"\x7E\x22") # Valid but wrong service (Tester Present)
def _test_wrong_service_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.control_dtc_setting(setting_type=0x22)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_setting_type_response_exception(self):
self.wait_request_and_respond(b"\xC5\x23") # Valid but access type
def _test_bad_setting_type_response_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.control_dtc_setting(setting_type=0x22)
def test_bad_setting_type_response_no_exception(self):
self.wait_request_and_respond(b"\xC5\x23") # Valid but access type
def _test_bad_setting_type_response_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.control_dtc_setting(setting_type=0x22)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_param(self):
pass
def _test_bad_param(self):
with self.assertRaises(ValueError):
self.udsclient.control_dtc_setting(setting_type=-1)
with self.assertRaises(ValueError):
self.udsclient.control_dtc_setting(setting_type=0x80)
with self.assertRaises(ValueError):
self.udsclient.control_dtc_setting(setting_type=services.ControlDTCSetting.SettingType.on, data=1)
with self.assertRaises(ValueError):
self.udsclient.control_dtc_setting(setting_type=services.ControlDTCSetting.SettingType.on, data='asdasdasd')
| mit |
turian/pitch-detection-echonest | pitch-detection-echonest.py | 1 | 3167 | #!/usr/bin/python
import os
import sys
import pyechonest.config as config
import echonest.remix.audio as audio
import numpy
import matplotlib.pyplot as plt
from midiutil.MidiFile import MIDIFile
config.ECHO_NEST_API_KEY = os.environ["ECHO_NEST_API_KEY"]
def normalize_loudness(audiofile):
loudness = numpy.array([p.mean_loudness() for p in audiofile.analysis.segments])
ld = numpy.max(loudness) - numpy.min(loudness)
return (loudness - numpy.min(loudness)) / ld
def generate_image(audiofile):
pitches = numpy.array([p.pitches for p in audiofile.analysis.segments])
aspect = 1. / (audiofile.analysis.duration / 60.)
normalized_loudness = normalize_loudness(audiofile)
# Multiply each pitch by its loudness
normalized_pitches = pitches.T * normalized_loudness
durations = numpy.array([p.duration for p in audiofile.analysis.segments])
segment_end_times = numpy.cumsum(durations)
# Now, create an interpolated array that respects the duration of each segment
BUCKETS = len(audiofile.analysis.segments) * 10
duration_step = audiofile.analysis.duration / BUCKETS
normalized_pitches_timestretched = numpy.zeros((12, BUCKETS))
for i in range(12):
pos = 0.
segment_idx = 0
for j in range(BUCKETS):
# Find the segment that corresponds to this position
while(segment_end_times[segment_idx] < pos and segment_idx < len(audiofile.analysis.segments)):
segment_idx += 1
normalized_pitches_timestretched[i,j] = normalized_pitches[i, segment_idx]
# Advance the position
pos += duration_step
plt.clf()
plt.imshow(normalized_pitches_timestretched, extent=[0,audiofile.analysis.duration,0,12])
plt.show()
plt.savefig('foo.pdf')
def generate_midi(audiofile):
# Create the MIDIFile Object with 1 track
MyMIDI = MIDIFile(1)
# Tracks are numbered from zero. Times are measured in beats.
track = 0
time = 0
print "Tempo:", audiofile.analysis.tempo
# Add track name and tempo.
MyMIDI.addTrackName(track,time,"Sample Track")
MyMIDI.addTempo(track,time,audiofile.analysis.tempo["value"])
durations = numpy.array([p.duration for p in audiofile.analysis.segments])
segment_end_times = numpy.cumsum(durations)
segment_start_times = segment_end_times - durations
normalized_loudness = normalize_loudness(audiofile)
track = 0
channel = 0
for i in range(len(audiofile.analysis.segments)):
for j in range(12):
pitch = j + 60
time = segment_start_times[i]
duration = durations[i]
volume = int(normalized_loudness[i] * audiofile.analysis.segments[i].pitches[j] * 100)
# Now add the note.
MyMIDI.addNote(track,channel,pitch,time,duration,volume)
# And write it to disk.
binfile = open("output.mid", 'wb')
MyMIDI.writeFile(binfile)
binfile.close()
if __name__ == "__main__":
assert len(sys.argv) == 2
filename = sys.argv[1]
audiofile = audio.LocalAudioFile(filename)
# generate_image(audiofile)
generate_midi(audiofile)
| bsd-3-clause |
iamutkarshtiwari/kivy | kivy/extras/highlight.py | 78 | 1880 | '''Pygments lexer for kv language
'''
from pygments.lexer import RegexLexer, bygroups, using
from pygments.lexers.agile import PythonLexer
from pygments import highlight
from pygments.token import *
from pygments.formatters import get_formatter_by_name
import sys
class KivyLexer(RegexLexer):
name = 'Kivy'
aliases = ['kivy', 'kv']
filenames = ['*.kv']
tokens = {
'root': [
(r'#:.*?$', Comment.Preproc),
(r'#.*?$', using(PythonLexer)),
(r'\s+', Text),
(r'<.+>', Name.Namespace),
(r'(\[)(\s*)(.*?)(\s*)(@)',
bygroups(Punctuation, Text, Name.Class, Text, Operator),
'classList'),
(r'[A-Za-z][A-Za-z0-9]*$', Name.Attribute),
(r'(.*?)(\s*)(:)(\s*)$',
bygroups(Name.Class, Text, Punctuation, Text)),
(r'(.*?)(\s*)(:)(\s*)(.*?)$',
bygroups(Name.Attribute, Text, Punctuation, Text,
using(PythonLexer)))],
'classList': [
(r'(,)(\s*)([A-Z][A-Za-z0-9]*)',
bygroups(Punctuation, Text, Name.Class)),
(r'(\+)(\s*)([A-Z][A-Za-z0-9]*)',
bygroups(Operator, Text, Name.Class)),
(r'\s+', Text),
(r'[A-Z][A-Za-z0-9]*', Name.Class),
(r'\]', Punctuation, '#pop')]}
if __name__ == '__main__':
''' This lexer will highlight .kv file. The first argument is the source
file, the second argument is the format of the destination and the third
argument is the output filename
'''
if len(sys.argv) is not 4:
raise Exception('Three arguments expected, found %s' %
(len(sys.argv) - 1))
k = KivyLexer()
with open(sys.argv[1], 'r') as fd:
with open(sys.argv[3], 'w') as out:
highlight(fd.read(), k, get_formatter_by_name(sys.argv[2]), out)
| mit |
ReachingOut/unisubs | libs/markdown/__init__.py | 17 | 21514 | """
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
md = Markdown()
html = md.convert(your_text_string)
## Basic use from the command line:
python markdown.py source.txt > destination.html
Run "python markdown.py --help" to see more options.
## Extensions
See <http://www.freewisdom.org/projects/python-markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: [email protected]
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
version = "2.0"
version_info = (2,0,0, "Final")
import re
import codecs
import sys
import warnings
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
"""
CONSTANTS
=============================================================================
"""
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
# default logging level for command-line use
COMMAND_LINE_LOGGING_LEVEL = CRITICAL
TAB_LENGTH = 4 # expand tabs to this many spaces
ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
SMART_EMPHASIS = True # this_or_that does not become this<i>or</i>that
DEFAULT_OUTPUT_FORMAT = 'xhtml1' # xhtml or html4 output
HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|ins|del|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td")
DOC_TAG = "div" # Element used to wrap document - later removed
# Placeholders
STX = u'\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = u'\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
(u'\u2D30', u'\u2D7F'), # Tifinagh
)
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def message(level, text):
""" A wrapper method for logging debug messages. """
logger = logging.getLogger('MARKDOWN')
if logger.handlers:
# The logger is configured
logger.log(level, text)
if level > WARN:
sys.exit(0)
elif level > WARN:
raise MarkdownException, text
else:
warnings.warn(text, MarkdownWarning)
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
return BLOCK_LEVEL_ELEMENTS.match(tag)
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(unicode):
"""A string which should not be further processed."""
pass
class MarkdownException(Exception):
""" A Markdown Exception. """
pass
class MarkdownWarning(Warning):
""" A Markdown Warning. """
pass
"""
OVERALL DESIGN
=============================================================================
Markdown processing takes place in four steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One such
treeprocessor runs InlinePatterns against the ElementTree, detecting inline
markup.
4. Some post-processors are run against the text after the ElementTree has
been serialized into text.
5. The output is written to a string.
Those steps are put together by the Markdown() class.
"""
import preprocessors
import blockprocessors
import treeprocessors
import inlinepatterns
import postprocessors
import blockparser
import etree_loader
import odict
# Extensions should use "markdown.etree" instead of "etree" (or do `from
# markdown import etree`). Do not import it by yourself.
etree = etree_loader.importETree()
# Adds the ability to output html4
import html4
class Markdown:
"""Convert Markdown to HTML."""
def __init__(self,
extensions=[],
extension_configs={},
safe_mode = False,
output_format=DEFAULT_OUTPUT_FORMAT):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension-configs: Configuration setting for extensions.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
"""
self.safeMode = safe_mode
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
# Preprocessors
self.preprocessors = odict.OrderedDict()
self.preprocessors["html_block"] = \
preprocessors.HtmlBlockPreprocessor(self)
self.preprocessors["reference"] = \
preprocessors.ReferencePreprocessor(self)
# footnote preprocessor will be inserted with "<reference"
# Block processors - ran by the parser
self.parser = blockparser.BlockParser()
self.parser.blockprocessors['empty'] = \
blockprocessors.EmptyBlockProcessor(self.parser)
self.parser.blockprocessors['indent'] = \
blockprocessors.ListIndentProcessor(self.parser)
self.parser.blockprocessors['code'] = \
blockprocessors.CodeBlockProcessor(self.parser)
self.parser.blockprocessors['hashheader'] = \
blockprocessors.HashHeaderProcessor(self.parser)
self.parser.blockprocessors['setextheader'] = \
blockprocessors.SetextHeaderProcessor(self.parser)
self.parser.blockprocessors['hr'] = \
blockprocessors.HRProcessor(self.parser)
self.parser.blockprocessors['olist'] = \
blockprocessors.OListProcessor(self.parser)
self.parser.blockprocessors['ulist'] = \
blockprocessors.UListProcessor(self.parser)
self.parser.blockprocessors['quote'] = \
blockprocessors.BlockQuoteProcessor(self.parser)
self.parser.blockprocessors['paragraph'] = \
blockprocessors.ParagraphProcessor(self.parser)
#self.prePatterns = []
# Inline patterns - Run on the tree
self.inlinePatterns = odict.OrderedDict()
self.inlinePatterns["backtick"] = \
inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE)
self.inlinePatterns["escape"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE)
self.inlinePatterns["reference"] = \
inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self)
self.inlinePatterns["link"] = \
inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self)
self.inlinePatterns["image_link"] = \
inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self)
self.inlinePatterns["image_reference"] = \
inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self)
self.inlinePatterns["autolink"] = \
inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self)
self.inlinePatterns["automail"] = \
inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self)
self.inlinePatterns["linebreak2"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br')
self.inlinePatterns["linebreak"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br')
self.inlinePatterns["html"] = \
inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self)
self.inlinePatterns["entity"] = \
inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self)
self.inlinePatterns["not_strong"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE)
self.inlinePatterns["strong_em"] = \
inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em')
self.inlinePatterns["strong"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong')
self.inlinePatterns["emphasis"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em')
self.inlinePatterns["emphasis2"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em')
# The order of the handlers matters!!!
# Tree processors - run once we have a basic parse.
self.treeprocessors = odict.OrderedDict()
self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self)
self.treeprocessors["prettify"] = \
treeprocessors.PrettifyTreeprocessor(self)
# Postprocessors - finishing touches.
self.postprocessors = odict.OrderedDict()
self.postprocessors["raw_html"] = \
postprocessors.RawHtmlPostprocessor(self)
self.postprocessors["amp_substitute"] = \
postprocessors.AndSubstitutePostprocessor()
# footnote postprocessor will be inserted with ">amp_substitute"
# Map format keys to serializers
self.output_formats = {
'html' : html4.to_html_string,
'html4' : html4.to_html_string,
'xhtml' : etree.tostring,
'xhtml1': etree.tostring,
}
self.references = {}
self.htmlStash = preprocessors.HtmlStash()
self.registerExtensions(extensions = extensions,
configs = extension_configs)
self.set_output_format(output_format)
self.reset()
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword aurguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = load_extension(ext, configs.get(ext, []))
try:
ext.extendMarkdown(self, globals())
except AttributeError:
message(ERROR, "Incorrect type! Extension '%s' is "
"neither a string or an Extension." %(repr(ext)))
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
extension.reset()
def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except KeyError:
message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError:
message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
source = source.replace(STX, "").replace(ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(TAB_LENGTH)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf8"))
if self.stripTopLevelTags:
start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2
end = output.rindex('</%s>'%DOC_TAG)
output = output[start:end].strip()
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: Name of source text file.
* output: Name of output file. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
text = text.lstrip(u'\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if isinstance(output, (str, unicode)):
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(html)
output_file.close()
else:
output.write(html.encode(encoding))
"""
Extensions
-----------------------------------------------------------------------------
"""
class Extension:
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return ""
def getConfigInfo(self):
""" Return all config settings as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
pass
def load_extension(ext_name, configs = []):
"""Load extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module names
ext_module = 'markdown.extensions'
module_name_new_style = '.'.join([ext_module, ext_name])
module_name_old_style = '_'.join(['mdx', ext_name])
# Try loading the extention first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name_new_style, {}, {}, [ext_module])
except ImportError:
try: # Old style (mdx.<extension>)
module = __import__(module_name_old_style)
except ImportError:
message(WARN, "Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name_new_style, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError:
message(CRITICAL, "Failed to initiate extension '%s'" % ext_name)
def load_extensions(ext_names):
"""Loads multiple extensions"""
extensions = []
for ext_name in ext_names:
extension = load_extension(ext_name)
if extension:
extensions.append(extension)
return extensions
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text,
extensions = [],
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* extensions: A list of extensions or extension names (may contain config args).
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
Returns: An HTML document as a string.
"""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
return md.convert(text)
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Read markdown code from a file and write it to a file or a stream."""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
md.convertFile(input, output, encoding)
| agpl-3.0 |
lgarren/spack | var/spack/repos/builtin/packages/perl-bio-perl/package.py | 1 | 3156 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import inspect
class PerlBioPerl(PerlPackage):
"""Functional access to BioPerl for people who don't know objects"""
homepage = "http://search.cpan.org/~cjfields/BioPerl-1.007002/Bio/Perl.pm"
url = "http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.007002.tar.gz"
version('1.007002', 'a912c92b56d009198f1786b4cf560d5c')
depends_on('perl-module-build', type='build')
depends_on('perl-uri-escape', type=('build', 'run'))
depends_on('perl-io-string', type=('build', 'run'))
depends_on('perl-data-stag', type=('build', 'run'))
depends_on('perl-test-most', type=('build', 'run'))
def configure(self, spec, prefix):
# Overriding default configure method in order to cater to interactive
# Build.pl
self.build_method = 'Build.PL'
self.build_executable = Executable(
join_path(self.stage.source_path, 'Build'))
# Config questions consist of:
# Do you want to run the Bio::DB::GFF or Bio::DB::SeqFeature::Store
# live database tests? y/n [n]
#
# Install [a]ll BioPerl scripts, [n]one, or choose groups
# [i]nteractively? [a]
#
# Do you want to run tests that require connection to servers across
# the internet (likely to cause some failures)? y/n [n]
#
# Eventually, someone can add capability for the other options, but
# the current answers are the most practical for a spack install.
config_answers = ['n\n', 'a\n', 'n\n']
config_answers_filename = 'spack-config.in'
with open(config_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
inspect.getmodule(self).perl('Build.PL', '--install_base=%s' %
self.prefix, input=f)
| lgpl-2.1 |
danielvdende/incubator-airflow | tests/sensors/test_timedelta_sensor.py | 15 | 1750 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
from airflow import configuration
from airflow import models, DAG
from airflow.sensors.time_delta_sensor import TimeDeltaSensor
from airflow.utils.timezone import datetime
configuration.load_test_config()
DEFAULT_DATE = datetime(2015, 1, 1)
DEV_NULL = '/dev/null'
TEST_DAG_ID = 'unit_tests'
class TimedeltaSensorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_timedelta_sensor(self):
t = TimeDeltaSensor(
task_id='timedelta_sensor_check',
delta=timedelta(seconds=2),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/251_test_scriptpackages.py | 43 | 1366 | # Copyright (C) 2003 Python Software Foundation
import unittest
import os
import sys
import tempfile
from test import test_support
import aetools
class TestScriptpackages(unittest.TestCase):
def _test_scriptpackage(self, package, testobject=1):
# Check that we can import the package
mod = __import__(package)
# Test that we can get the main event class
klass = getattr(mod, package)
# Test that we can instantiate that class
talker = klass()
if testobject:
# Test that we can get an application object
obj = mod.application(0)
def test__builtinSuites(self):
self._test_scriptpackage('_builtinSuites', testobject=0)
def test_StdSuites(self):
self._test_scriptpackage('StdSuites')
def test_SystemEvents(self):
self._test_scriptpackage('SystemEvents')
def test_Finder(self):
self._test_scriptpackage('Finder')
def test_Terminal(self):
self._test_scriptpackage('Terminal')
def test_Netscape(self):
self._test_scriptpackage('Netscape')
def test_Explorer(self):
self._test_scriptpackage('Explorer')
def test_CodeWarrior(self):
self._test_scriptpackage('CodeWarrior')
def test_main():
test_support.run_unittest(TestScriptpackages)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
chao787/python-libs | rope/refactor/rename.py | 59 | 9359 | import warnings
from rope.base import exceptions, pyobjects, pynames, taskhandle, evaluate, worder, codeanalyze
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import occurrences, sourceutils
class Rename(object):
"""A class for performing rename refactoring
It can rename everything: classes, functions, modules, packages,
methods, variables and keyword arguments.
"""
def __init__(self, project, resource, offset=None):
"""If `offset` is None, the `resource` itself will be renamed"""
self.project = project
self.pycore = project.pycore
self.resource = resource
if offset is not None:
self.old_name = worder.get_name_at(self.resource, offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
self.old_instance, self.old_pyname = \
evaluate.eval_location2(this_pymodule, offset)
if self.old_pyname is None:
raise exceptions.RefactoringError(
'Rename refactoring should be performed'
' on resolvable python identifiers.')
else:
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
dummy_pymodule = self.pycore.get_string_module('')
self.old_instance = None
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
if resource.is_folder():
self.old_name = resource.name
else:
self.old_name = resource.name[:-3]
def get_old_name(self):
return self.old_name
def get_changes(self, new_name, in_file=None, in_hierarchy=False,
unsure=None, docs=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed for this refactoring
Parameters:
- `in_hierarchy`: when renaming a method this keyword forces
to rename all matching methods in the hierarchy
- `docs`: when `True` rename refactoring will rename
occurrences in comments and strings where the name is
visible. Setting it will make renames faster, too.
- `unsure`: decides what to do about unsure occurrences.
If `None`, they are ignored. Otherwise `unsure` is
called with an instance of `occurrence.Occurrence` as
parameter. If it returns `True`, the occurrence is
considered to be a match.
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
- `in_file`: this argument has been deprecated; use
`resources` instead.
"""
if unsure in (True, False):
warnings.warn(
'unsure parameter should be a function that returns '
'True or False', DeprecationWarning, stacklevel=2)
def unsure_func(value=unsure):
return value
unsure = unsure_func
if in_file is not None:
warnings.warn(
'`in_file` argument has been deprecated; use `resources` '
'instead. ', DeprecationWarning, stacklevel=2)
if in_file:
resources = [self.resource]
if _is_local(self.old_pyname):
resources = [self.resource]
if resources is None:
resources = self.pycore.get_python_files()
changes = ChangeSet('Renaming <%s> to <%s>' %
(self.old_name, new_name))
finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname, unsure=unsure,
docs=docs, instance=self.old_instance,
in_hierarchy=in_hierarchy and self.is_method())
job_set = task_handle.create_jobset('Collecting Changes', len(resources))
for file_ in resources:
job_set.started_job(file_.path)
new_content = rename_in_module(finder, new_name, resource=file_)
if new_content is not None:
changes.add_change(ChangeContents(file_, new_content))
job_set.finished_job()
if self._is_renaming_a_module():
resource = self.old_pyname.get_object().get_resource()
if self._is_allowed_to_move(resources, resource):
self._rename_module(resource, new_name, changes)
return changes
def _is_allowed_to_move(self, resources, resource):
if resource.is_folder():
try:
return resource.get_child('__init__.py') in resources
except exceptions.ResourceNotFoundError:
return False
else:
return resource in resources
def _is_renaming_a_module(self):
if isinstance(self.old_pyname.get_object(), pyobjects.AbstractModule):
return True
return False
def is_method(self):
pyname = self.old_pyname
return isinstance(pyname, pynames.DefinedName) and \
isinstance(pyname.get_object(), pyobjects.PyFunction) and \
isinstance(pyname.get_object().parent, pyobjects.PyClass)
def _rename_module(self, resource, new_name, changes):
if not resource.is_folder():
new_name = new_name + '.py'
parent_path = resource.parent.path
if parent_path == '':
new_location = new_name
else:
new_location = parent_path + '/' + new_name
changes.add_change(MoveResource(resource, new_location))
class ChangeOccurrences(object):
"""A class for changing the occurrences of a name in a scope
This class replaces the occurrences of a name. Note that it only
changes the scope containing the offset passed to the constructor.
What's more it does not have any side-effects. That is for
example changing occurrences of a module does not rename the
module; it merely replaces the occurrences of that module in a
scope with the given expression. This class is useful for
performing many custom refactorings.
"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self.old_name = worder.get_name_at(resource, offset)
self.pymodule = self.pycore.resource_to_pyobject(self.resource)
self.old_pyname = evaluate.eval_location(self.pymodule, offset)
def get_old_name(self):
word_finder = worder.Worder(self.resource.read())
return word_finder.get_primary_at(self.offset)
def _get_scope_offset(self):
lines = self.pymodule.lines
scope = self.pymodule.get_scope().\
get_inner_scope_for_line(lines.get_line_number(self.offset))
start = lines.get_line_start(scope.get_start())
end = lines.get_line_end(scope.get_end())
return start, end
def get_changes(self, new_name, only_calls=False, reads=True, writes=True):
changes = ChangeSet('Changing <%s> occurrences to <%s>' %
(self.old_name, new_name))
scope_start, scope_end = self._get_scope_offset()
finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname,
imports=False, only_calls=only_calls)
new_contents = rename_in_module(
finder, new_name, pymodule=self.pymodule, replace_primary=True,
region=(scope_start, scope_end), reads=reads, writes=writes)
if new_contents is not None:
changes.add_change(ChangeContents(self.resource, new_contents))
return changes
def rename_in_module(occurrences_finder, new_name, resource=None, pymodule=None,
replace_primary=False, region=None, reads=True, writes=True):
"""Returns the changed source or `None` if there is no changes"""
if resource is not None:
source_code = resource.read()
else:
source_code = pymodule.source_code
change_collector = codeanalyze.ChangeCollector(source_code)
for occurrence in occurrences_finder.find_occurrences(resource, pymodule):
if replace_primary and occurrence.is_a_fixed_primary():
continue
if replace_primary:
start, end = occurrence.get_primary_range()
else:
start, end = occurrence.get_word_range()
if (not reads and not occurrence.is_written()) or \
(not writes and occurrence.is_written()):
continue
if region is None or region[0] <= start < region[1]:
change_collector.add_change(start, end, new_name)
return change_collector.get_changed()
def _is_local(pyname):
module, lineno = pyname.get_definition_location()
if lineno is None:
return False
scope = module.get_scope().get_inner_scope_for_line(lineno)
if isinstance(pyname, pynames.DefinedName) and \
scope.get_kind() in ('Function', 'Class'):
scope = scope.parent
return scope.get_kind() == 'Function' and \
pyname in scope.get_names().values() and \
isinstance(pyname, pynames.AssignedName)
| gpl-2.0 |
openlabs/trytond-nereid | routing.py | 1 | 15749 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from ast import literal_eval
import pytz
from werkzeug import abort, redirect
from wtforms import Form, TextField, PasswordField, validators
from nereid import jsonify, flash, render_template, url_for, cache
from nereid.globals import session, request
from nereid.helpers import login_required, key_from_list, get_flashed_messages
from nereid.signals import login, failed_login, logout
from trytond.model import ModelView, ModelSQL, fields
from trytond.backend import TableHandler
from trytond.transaction import Transaction
from trytond.pool import Pool
from .i18n import _
__all__ = ['URLMap', 'WebSite', 'URLRule', 'URLRuleDefaults',
'WebsiteCountry', 'WebsiteCurrency']
class URLMap(ModelSQL, ModelView):
"""
URL Map
~~~~~~~
A collection of URLs for a website. This is analogous to werkzeug's
URL Map.
:param name: Name of the URL Map
:param default_subdomain: Default subdomain for URLs in this Map
:param active: Whether the URL Map is active or not.
Rules:
~~~~~~
:param rules: O2M URLRules
Advanced:
~~~~~~~~~
:param charset: default value - utf-8
:param strict_slashes: Boolean field if / in url map is taken seriously
:param unique_urls: Enable `redirect_defaults` in the URL Map and
redirects the defaults to the URL
"""
__name__ = "nereid.url_map"
name = fields.Char(
'Name', required=True, select=True,
)
default_subdomain = fields.Char(
'Default Subdomain',
)
rules = fields.One2Many(
'nereid.url_rule',
'url_map',
'Rules'
)
charset = fields.Char('Char Set')
strict_slashes = fields.Boolean('Strict Slashes')
unique_urls = fields.Boolean('Unique URLs')
active = fields.Boolean('Active')
@staticmethod
def default_active():
"By default URL is active"
return True
@staticmethod
def default_charset():
"By default characterset is utf-8"
return 'utf-8'
def get_rules_arguments(self):
"""
Constructs a list of dictionary of arguments needed
for URL Rule construction. A wrapper around the
URL RULE get_rule_arguments
"""
rule_args = [ ]
for rule in self.rules:
rule_args.append(rule.get_rule_arguments())
return rule_args
class LoginForm(Form):
"Default Login Form"
email = TextField(_('e-mail'), [validators.Required(), validators.Email()])
password = PasswordField(_('Password'), [validators.Required()])
class WebSite(ModelSQL, ModelView):
"""
One of the most powerful features of Nereid is the ability to
manage multiple websites from one back-end. A web site in nereid
represents a collection or URLs, settings.
:param name: Name of the web site
:param base_url: The unique URL of the website, You cannot have two
websites, with the same base_url
:param url_map: The active URL Map for the website (M2O URLMap)
:param company: The company linked with the website.
:param active: Whether the website is active or not.
"""
__name__ = "nereid.website"
#: The name field is used for both information and also as
#: the site identifier for nereid. The WSGI application requires
#: SITE argument. The SITE argument is then used to load URLs and
#: other settings for the website. Needs to be unique
name = fields.Char('Name', required=True, select=True)
#: The URLMap is made as a different object which functions as a
#: collection of Rules. This will allow easy replication of sites
#: which perform with same URL structures but different templates
url_map = fields.Many2One('nereid.url_map', 'URL Map', required=True)
#: The company to which the website belongs. Useful when creating
#: records like sale order which require a company to be present
company = fields.Many2One('company.company', 'Company', required=True)
active = fields.Boolean('Active')
#: The list of countries this website operates in. Used for generating
#: Countries list in the registration form etc.
countries = fields.Many2Many(
'nereid.website-country.country', 'website', 'country',
'Countries Available')
#: Allowed currencies in the website
currencies = fields.Many2Many(
'nereid.website-currency.currency',
'website', 'currency', 'Currencies Available')
#: Default language
default_language = fields.Many2One('ir.lang', 'Default Language',
required=True)
#: The res.user with which the nereid application will be loaded
#: .. versionadded: 0.3
application_user = fields.Many2One(
'res.user', 'Application User', required=True
)
guest_user = fields.Many2One(
'nereid.user', 'Guest user', required=True
)
timezone = fields.Selection(
[(x, x) for x in pytz.common_timezones], 'Timezone', translate=False
)
@staticmethod
def default_timezone():
return 'UTC'
@staticmethod
def default_active():
return True
@classmethod
def __setup__(cls):
super(WebSite, cls).__setup__()
cls._sql_constraints = [
('name_uniq', 'UNIQUE(name)',
'Another site with the same name already exists!')
]
@classmethod
def country_list(cls):
"""
Return the list of countries in JSON
"""
return jsonify(result = [
{'key': c.id, 'value': c.name} \
for c in request.nereid_website.countries
])
@staticmethod
def subdivision_list():
"""
Return the list of states for given country
"""
country = int(request.args.get('country', 0))
if country not in [c.id for c in request.nereid_website.countries]:
abort(404)
Subdivision = Pool().get('country.subdivision')
subdivisions = Subdivision.search([('country', '=', country)])
return jsonify(
result = [{
'id': s.id,
'name': s.name,
'code': s.code,
} for s in subdivisions
]
)
def get_urls(self, name):
"""
Return complete list of URLs
"""
URLMap = Pool().get('nereid.url_map')
websites = self.search([('name', '=', name)])
if not websites:
raise RuntimeError("Website with Name %s not found" % name)
return URLMap.get_rules_arguments(websites[0].url_map.id)
def stats(self, **arguments):
"""
Test method.
"""
return u'Request: %s\nArguments: %s\nEnviron: %s\n' \
% (request, arguments, request.environ)
@classmethod
def home(cls):
"A dummy home method which just renders home.jinja"
return render_template('home.jinja')
@classmethod
def login(cls):
"""
Simple login based on the email and password
Required post data see :class:LoginForm
"""
login_form = LoginForm(request.form)
if not request.is_guest_user and request.args.get('next'):
return redirect(request.args['next'])
if request.method == 'POST' and login_form.validate():
NereidUser = Pool().get('nereid.user')
result = NereidUser.authenticate(
login_form.email.data, login_form.password.data
)
# Result can be the following:
# 1 - Browse record of User (successful login)
# 2 - None - Login failure without message
# 3 - Any other false value (no message is shown. useful if you
# want to handle the message shown to user)
if result:
# NOTE: Translators leave %s as such
flash(_("You are now logged in. Welcome %(name)s",
name=result.display_name))
session['user'] = result.id
login.send()
if request.is_xhr:
return 'OK'
else:
return redirect(
request.values.get(
'next', url_for('nereid.website.home')
)
)
elif result is None:
flash(_("Invalid login credentials"))
failed_login.send(form=login_form)
if request.is_xhr:
return 'NOK'
return render_template('login.jinja', login_form=login_form)
@classmethod
def logout(cls):
"Log the user out"
session.pop('user', None)
logout.send()
flash(
_('You have been logged out successfully. Thanks for visiting us')
)
return redirect(
request.args.get('next', url_for('nereid.website.home'))
)
@staticmethod
def account_context():
"""This fills the account context for the template
rendering my account. Additional modules might want to fill extra
data into the context
"""
return dict(
user = request.nereid_user,
party = request.nereid_user.party,
)
@classmethod
@login_required
def account(cls):
return render_template('account.jinja', **cls.account_context())
def get_currencies(self):
"""Returns available currencies for current site
.. note::
A special method is required so that the fetch can be speeded up,
by pushing the categories to the central cache which cannot be
done directly on a browse node.
"""
cache_key = key_from_list([
Transaction().cursor.dbname,
Transaction().user,
'nereid.website.get_currencies',
])
# The website is automatically appended to the cache prefix
rv = cache.get(cache_key)
if rv is None:
rv = [{
'id': c.id,
'name': c.name,
'symbol': c.symbol,
} for c in self.currencies]
cache.set(cache_key, rv, 60*60)
return rv
@staticmethod
def _user_status():
"""Returns the commonly required status parameters of the user
This method could be inherited and components could be added
"""
rv = {
'messages': get_flashed_messages()
}
if request.is_guest_user:
rv.update({
'logged_id': False
})
else:
rv.update({
'logged_in': True,
'name': request.nereid_user.display_name
})
return rv
@classmethod
def user_status(cls):
"""
Returns a JSON of the user_status
"""
return jsonify(status=cls._user_status())
class URLRule(ModelSQL, ModelView):
"""
URL Rule
~~~~~~~~
A rule that represents a single URL pattern
:param path: Path of the URL
:param name: Name of the URL. This is used for reverse mapping, hence
needs to be unique
:param handler: The handler of this URL or the target model.method
which is called. The representation is::
<model>.<method>
For example: To call list_parties method in party.party use:
party.party.list_parties
The signature of the method being called should be:
def method(self, **arguments):
return "Hello World"
where request is the request object and arguments is the dictionary
of the values generated from the match of the URL
:param active: Whether the website is active or not.
Advanced
~~~~~~~~~
:param defaults: Defaults of the URL (O2M - URLRuleDefaults)
:param method: POST, GET,
:param only_for_generation: URL will not be mapped, but can be used
for URL generation. Example for static pages, where content
delivery is managed by apache, but URL generation is necessary
:param redirect_to: (M2O self) Another URL to which the redirect has to
be done
:param sequence: Numeric sequence of the URL Map.
:param url_map: Relation field for url_rule o2m
"""
__name__ = "nereid.url_rule"
_rec_name = 'rule'
rule = fields.Char('Rule', required=True, select=True,)
endpoint = fields.Char('Endpoint', select=True,)
active = fields.Boolean('Active')
defaults = fields.One2Many('nereid.url_rule_defaults', 'rule', 'Defaults')
# Supported HTTP methods
http_method_get = fields.Boolean('GET')
http_method_post = fields.Boolean('POST')
http_method_patch = fields.Boolean('PATCH')
http_method_put = fields.Boolean('PUT')
http_method_delete = fields.Boolean('DELETE')
only_for_genaration = fields.Boolean('Only for Generation')
redirect_to = fields.Char('Redirect To')
sequence = fields.Integer('Sequence', required=True,)
url_map = fields.Many2One('nereid.url_map', 'URL Map')
@classmethod
def __setup__(cls):
super(URLRule, cls).__setup__()
cls._order.insert(0, ('sequence', 'ASC'))
@staticmethod
def default_active():
return True
@staticmethod
def default_http_method_get():
return True
def get_http_methods(self):
"""
Returns an iterable of HTTP methods that the URL has to support.
.. versionadded: 2.4.0.6
"""
methods = []
if self.http_method_get:
methods.append('GET')
if self.http_method_post:
methods.append('POST')
if self.http_method_put:
methods.append('PUT')
if self.http_method_delete:
methods.append('DELETE')
if self.http_method_patch:
methods.append('PATCH')
return methods
def get_rule_arguments(self):
"""
Return the arguments of a Rule in the corresponding format
"""
defaults = dict(
[(i.key, i.value) for i in self.defaults]
)
return {
'rule': self.rule,
'endpoint': self.endpoint,
'methods': self.get_http_methods(),
'build_only': self.only_for_genaration,
'defaults': defaults,
'redirect_to': self.redirect_to or None,
}
class URLRuleDefaults(ModelSQL, ModelView):
"""
Defaults for the URL
:param key: The char for the default's key
:param value: The Value for the default's Value
:param Rule: M2O Rule
"""
__name__ = "nereid.url_rule_defaults"
_rec_name = 'key'
key = fields.Char('Key', required=True, select=True)
value = fields.Char('Value', required=True, select=True)
rule = fields.Many2One('nereid.url_rule', 'Rule', required=True,
select=True)
class WebsiteCountry(ModelSQL):
"Website Country Relations"
__name__ = 'nereid.website-country.country'
website = fields.Many2One('nereid.website', 'Website')
country = fields.Many2One('country.country', 'Country')
class WebsiteCurrency(ModelSQL):
"Currencies to be made available on website"
__name__ = 'nereid.website-currency.currency'
_table = 'website_currency_rel'
website = fields.Many2One(
'nereid.website', 'Website',
ondelete='CASCADE', select=1, required=True)
currency = fields.Many2One(
'currency.currency', 'Currency',
ondelete='CASCADE', select=1, required=True)
| gpl-3.0 |
mrgloom/convnet-1 | py/conv_cpu.py | 3 | 31841 | """Convolution methods on CPU."""
# These are extremely slow.
# Their main purpose is testing fast GPU implementations.
import numpy as np
def DivUp(a, b):
return (a + b - 1) / b
def ConvUp(images, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, kernel_size_x * kernel_size_y * num_input_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_index = (c * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = (c * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
output_data = np.dot(input_data, filters.T)
for c in xrange(num_output_channels):
output[:, offset + c * num_modules_x * num_modules_y] = output_data[:, c]
return output
def ConvDown(derivs, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
deriv = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_output_channels):
deriv[:, c] = derivs[:, offset + c * num_modules_x * num_modules_y]
d_input = np.dot(deriv, filters)
for c in xrange(num_input_channels):
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_index = (c * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = (c * image_size_y + y) * image_size_x + x
output[:, images_index] += d_input[:, input_data_index]
return output
def ConvOutp(images, derivs, image_shape, conv_spec, partial_sum_y=0, partial_sum_x=0):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
if partial_sum_x == 0:
partial_sum_x = num_modules_x
if partial_sum_y == 0:
partial_sum_y = num_modules_y
partial_sum_locs_x = DivUp(num_modules_x, partial_sum_x)
partial_sum_locs_y = DivUp(num_modules_y, partial_sum_y)
input_size = kernel_size_y * kernel_size_x * num_input_channels
output = np.zeros((num_output_channels, input_size), dtype=np.float32)
output2 = np.zeros((num_output_channels, input_size), dtype=np.float32)
output_psums = np.zeros((num_output_channels, input_size * partial_sum_locs_x * partial_sum_locs_y), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, input_size), dtype=np.float32)
deriv_data = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_index = (c * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = (c * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
for c in xrange(num_output_channels):
deriv_data[:, c] = derivs[:, offset + c * num_modules_x * num_modules_y]
partial_sum_id_y = y_pos / partial_sum_y
partial_sum_id_x = x_pos / partial_sum_x
partial_sum_id = partial_sum_id_y * partial_sum_locs_x + partial_sum_id_x
outp = np.dot(deriv_data.T, input_data)
output_psums[:, partial_sum_id * input_size : (partial_sum_id + 1) * input_size] += outp
output += outp
for partial_sum_id_y in xrange(partial_sum_locs_y):
for partial_sum_id_x in xrange(partial_sum_locs_x):
partial_sum_id = partial_sum_id_y * partial_sum_locs_x + partial_sum_id_x
output2 += output_psums[:, partial_sum_id * input_size : (partial_sum_id + 1) * input_size]
if not np.allclose(output2, output):
print 'Error', np.abs(output - output2).max()
print output
print output2
return output, output_psums
def MaxPool(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32) - np.inf
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
input_data = np.maximum(input_data, images[:, images_index])
output[:, offset + c * num_modules_x * num_modules_y] = input_data
return output
def MaxPool3D(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels * num_modules_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_t = t_pos * stride_t - padding_t
start_y = y_pos * stride_y - padding_y
start_x = x_pos * stride_x - padding_x
offset = (t_pos * num_output_channels * num_modules_y + y_pos) * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32) - np.inf
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data = np.maximum(input_data, images[:, images_index])
output[:, offset + c * num_modules_x * num_modules_y] = input_data
return output
def AvgPool3D(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels * num_modules_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_t = t_pos * stride_t - padding_t
start_y = y_pos * stride_y - padding_y
start_x = x_pos * stride_x - padding_x
offset = (t_pos * num_output_channels * num_modules_y + y_pos) * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32)
region_size = 0
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data += images[:, images_index]
region_size += 1
output[:, offset + c * num_modules_x * num_modules_y] = input_data / region_size
return output
def AvgPool(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32)
region_size = 0
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
input_data += images[:, images_index]
region_size += 1
output[:, offset + c * num_modules_x * num_modules_y] = input_data / region_size
return output
def MaxPoolUndo(images, maxes, derivs, image_shape, deriv_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros(images.shape, dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
for c in xrange(num_input_channels):
offset = x_pos + num_modules_x * (y_pos + num_modules_y * c)
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
for i in xrange(num_images):
if images[i, images_index] == maxes[i, offset]:
output[i, images_index] += derivs[i, offset]
return output
def MaxPoolRprop(images, R_images, maxes, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros(maxes.shape, dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
for c in xrange(num_input_channels):
offset = x_pos + num_modules_x * (y_pos + num_modules_y * c)
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
for i in xrange(num_images):
if images[i, images_index] == maxes[i, offset]:
output[i, offset] += R_images[i, images_index]
return output
def MaxPool3DUndo(images, maxes, derivs, image_shape, deriv_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros(images.shape, dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_input_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
for i in xrange(num_images):
if images[i, images_index] == maxes[i, offset]:
output[i, images_index] += derivs[i, offset]
return output
def AvgPool3DUndo(derivs, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels * image_size_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
end_y = start_y + kernel_size_y
end_x = start_x + kernel_size_x
end_t = start_t + kernel_size_t
start2_y = min(max(start_y, 0), image_size_y)
start2_x = min(max(start_x, 0), image_size_x)
start2_t = min(max(start_t, 0), image_size_t)
end_y = min(max(end_y, 0), image_size_y)
end_x = min(max(end_x, 0), image_size_x)
end_t = min(max(end_t, 0), image_size_t)
region_size = (end_y - start2_y) * (end_x - start2_x) * (end_t - start2_t)
for c in xrange(num_input_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
for t in xrange(start2_t, end_t):
for y in xrange(start2_y, end_y):
for x in xrange(start2_x, end_x):
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
output[:, images_index] += derivs[:, offset] / region_size
return output
def AvgPoolUndo(derivs, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_input_channels == num_output_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
end_y = start_y + kernel_size_y
end_x = start_x + kernel_size_x
start2_y = min(max(start_y, 0), image_size_y)
start2_x = min(max(start_x, 0), image_size_x)
end_y = min(max(end_y, 0), image_size_y)
end_x = min(max(end_x, 0), image_size_x)
region_size = (end_y - start2_y) * (end_x - start2_x)
for c in xrange(num_input_channels):
offset = (c * num_modules_y + y_pos) * num_modules_x + x_pos
for y in xrange(start2_y, end_y):
for x in xrange(start2_x, end_x):
images_index = (c * image_size_y + y) * image_size_x + x
output[:, images_index] += derivs[:, offset] / region_size
return output
def GetBounds(i, numF, num_channels, blocked):
if blocked:
startPos = (i / numF) * numF
else:
startPos = i - numF/2
endPos = min(startPos + numF, num_channels)
startPos = max(0, startPos)
return startPos, endPos
def GetBoundsInv(i, numF, num_channels, blocked):
"""Return the set of filters such that i appears in their normalization group."""
if blocked:
startPos = (i / numF) * numF
else:
startPos = i - numF + numF/2 + 1
endPos = min(startPos + numF, num_channels)
startPos = max(0, startPos)
return startPos, endPos
def ComputeDenoms(data, numF, blocked, addScale):
denoms = np.zeros(data.shape, dtype=data.dtype)
num_images, num_channels = data.shape
for i in xrange(num_channels):
startPos, endPos = GetBounds(i, numF, num_channels, blocked)
for j in xrange(startPos, endPos):
denoms[:, i] += data[:, j]**2
denoms = 1 + addScale * denoms
return denoms
def ResponseNormCrossMap(images, image_shape, numF, add_scale, pow_scale, blocked):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(image_size_y):
for x_pos in xrange(image_size_x):
this_loc_all_channels = np.zeros((num_images, num_input_channels), dtype=np.float32)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
this_loc_all_channels[:, c] = images[:, loc_id]
denoms = ComputeDenoms(this_loc_all_channels, numF, blocked, add_scale)
this_loc_all_channels *= np.power(denoms, -pow_scale)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
output[:, loc_id] = this_loc_all_channels[:, c]
return output
def ResponseNormCrossMapUndo(derivs, images, image_shape, numF, add_scale, pow_scale, blocked):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(image_size_y):
for x_pos in xrange(image_size_x):
this_loc_all_channels_data = np.zeros((num_images, num_input_channels), dtype=np.float32)
this_loc_all_channels_deriv = np.zeros((num_images, num_input_channels), dtype=np.float32)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
this_loc_all_channels_data[:, c] = images[:, loc_id]
this_loc_all_channels_deriv[:, c] = derivs[:, loc_id]
denoms = ComputeDenoms(this_loc_all_channels_data, numF, blocked, add_scale)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
startPos, endPos = GetBoundsInv(c, numF, num_input_channels, blocked)
output[:, loc_id] = this_loc_all_channels_deriv[:, c] * np.power(denoms[:, c], -pow_scale) \
- 2 * add_scale * pow_scale * this_loc_all_channels_data[:, c] * \
(this_loc_all_channels_deriv[:, startPos:endPos] \
* this_loc_all_channels_data[:, startPos:endPos] \
* np.power(denoms[:, startPos:endPos], -pow_scale-1)).sum(axis=1)
return output
def ResponseNormCrossMapRprop(images, derivs, image_shape, numF, add_scale, pow_scale, blocked):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(image_size_y):
for x_pos in xrange(image_size_x):
this_loc_all_channels_data = np.zeros((num_images, num_input_channels), dtype=np.float32)
this_loc_all_channels_deriv = np.zeros((num_images, num_input_channels), dtype=np.float32)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
this_loc_all_channels_data[:, c] = images[:, loc_id]
this_loc_all_channels_deriv[:, c] = derivs[:, loc_id]
denoms = ComputeDenoms(this_loc_all_channels_data, numF, blocked, add_scale)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
startPos, endPos = GetBounds(c, numF, num_input_channels, blocked)
output[:, loc_id] = this_loc_all_channels_deriv[:, c] * np.power(denoms[:, c], -pow_scale) \
- 2 * add_scale * pow_scale * this_loc_all_channels_data[:, c] * \
np.power(denoms[:, c], -pow_scale-1) * \
(this_loc_all_channels_deriv[:, startPos:endPos] \
* this_loc_all_channels_data[:, startPos:endPos]).sum(axis=1)
return output
def ConvUp3D(images, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels * num_modules_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, kernel_size_x * kernel_size_y * num_input_channels * kernel_size_t), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_input_channels):
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_t = t - start_t
input_data_index = ((input_data_t * num_input_channels + c) * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
output_data = np.dot(input_data, filters.T)
for c in xrange(num_output_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
output[:, offset] = output_data[:, c]
return output
def ConvDown3D(derivs, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels * image_size_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
deriv = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_output_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
deriv[:, c] = derivs[:, offset]
d_input = np.dot(deriv, filters)
for c in xrange(num_input_channels):
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_t = t - start_t
input_data_index = ((input_data_t * num_input_channels + c) * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
output[:, images_index] += d_input[:, input_data_index]
return output
def ConvOutp3D(images, derivs, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
input_size = kernel_size_y * kernel_size_x * num_input_channels * kernel_size_t
output = np.zeros((num_output_channels, input_size), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, input_size), dtype=np.float32)
deriv = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_input_channels):
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_t = t - start_t
input_data_index = ((input_data_t * num_input_channels + c) * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
for c in xrange(num_output_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
deriv[:, c] = derivs[:, offset]
output += np.dot(deriv.T, input_data)
return output
| bsd-2-clause |
jmimu/lander1 | map/map.py | 1 | 28865 |
ts_palette =[16744703, 0, 16777215, 16384, 32768, 65280, 127, 255, 8421631, 10485760, 12105912, 16744448, 65535, 4342338, 16512, 8388672]
tile_base_index = 0
ts_tiles = []
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,4,4],[0,0,0,0,4,4,3,3],[0,0,4,4,3,3,3,3],[4,4,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,4,4,4],[0,0,4,4,4,3,3,3],[4,4,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,4],[0,0,0,0,0,0,0,4],[0,0,0,0,0,0,4,3],[0,0,0,0,0,0,4,3],[0,0,0,0,0,0,4,3],[0,0,0,0,0,4,3,3],[0,0,0,0,0,4,3,3],[0,0,0,0,0,4,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,4],[0,0,0,4,4,4,4,3],[4,4,4,3,3,14,14,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,4,4,4,4],[4,4,4,4,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[4,4,0,0,0,0,0,0],[3,3,4,4,0,0,0,0],[3,3,3,3,4,4,0,0],[3,3,3,3,3,3,4,4]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[4,4,4,0,0,0,0,0],[3,3,3,4,4,4,0,0],[3,3,3,3,3,3,4,4]])
ts_tiles.append([[4,0,0,0,0,0,0,0],[4,0,0,0,0,0,0,0],[3,4,0,0,0,0,0,0],[3,4,0,0,0,0,0,0],[3,4,0,0,0,0,0,0],[3,3,4,0,0,0,0,0],[3,3,4,0,0,0,0,0],[3,3,4,0,0,0,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[4,0,0,0,0,0,0,0],[3,4,4,4,4,0,0,0],[3,14,14,3,3,4,4,4]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[4,4,4,4,0,0,0,0],[3,3,3,3,4,4,4,4],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,4],[0,0,0,0,0,0,0,4],[0,0,0,0,0,0,4,3],[0,0,0,0,0,0,4,3],[0,0,0,0,0,4,3,3],[0,0,0,0,0,4,3,3],[0,0,0,0,4,3,3,3],[0,0,0,0,4,3,3,3]])
ts_tiles.append([[4,0,0,0,0,0,0,0],[4,0,0,0,0,0,0,0],[3,4,0,0,0,0,0,0],[3,4,0,0,0,0,0,0],[3,3,4,0,0,0,0,0],[3,3,4,0,0,0,0,0],[3,3,3,4,0,0,0,0],[3,3,3,4,0,0,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,4],[0,0,0,0,0,0,4,3],[0,0,0,0,0,4,3,3],[0,0,0,0,4,3,3,3],[0,0,0,4,3,3,3,3],[0,0,4,3,3,3,3,3],[0,4,3,3,3,3,3,3],[4,3,3,3,3,14,14,3]])
ts_tiles.append([[0,0,0,0,0,0,4,4],[0,0,0,0,4,4,3,3],[0,0,4,4,3,3,3,3],[4,4,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,14,3,14,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,4],[0,0,0,0,4,4,4,3],[0,4,4,4,3,3,3,3],[4,3,3,3,3,3,3,3],[3,3,3,3,3,3,14,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,4,4],[0,0,0,4,4,4,3,3],[4,4,4,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,14,14,14,14,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,4,3,3,3,3],[0,0,0,4,3,3,3,3],[0,0,4,3,3,3,3,3],[0,0,4,3,3,3,14,3],[0,4,3,3,3,3,3,3],[0,4,3,3,3,3,3,3],[4,3,3,3,3,3,14,3],[4,3,3,3,3,3,3,3]])
ts_tiles.append([[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,4,3,3,3,3,3],[0,0,4,3,3,3,3,3],[0,0,4,3,3,3,3,3],[0,4,3,3,3,3,3,3],[0,4,3,3,3,3,3,3],[0,4,3,3,3,3,3,3],[4,3,3,3,3,3,3,3],[4,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,4,3,3],[0,0,0,0,4,3,3,3],[0,0,0,0,4,3,3,3],[0,0,0,0,4,3,3,3],[0,0,0,4,3,3,3,3],[0,0,0,4,3,3,3,3],[0,0,0,4,3,3,3,3],[0,0,4,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,4,4,4,4],[4,4,4,4,3,3,3,3],[3,3,3,3,3,3,3,14],[3,3,3,3,3,3,3,14],[3,3,3,3,14,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,4,4,4],[0,4,4,4,4,3,3,3],[4,3,3,3,3,3,3,3],[3,3,3,14,3,3,3,3],[3,3,3,14,14,3,3,3],[14,3,3,3,14,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[3,3,3,3,3,3,3,3],[3,3,3,3,14,3,3,3],[3,3,14,14,3,3,3,3],[3,3,14,3,3,3,3,3],[3,3,14,3,3,3,3,3],[3,3,14,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[3,3,3,3,3,3,14,3],[3,3,3,3,3,3,14,14],[3,3,3,3,3,3,14,14],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,14,3,14,3,3,3],[3,3,14,14,14,3,3,3],[3,14,14,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[4,0,0,0,0,0,0,0],[3,4,0,0,0,0,0,0],[3,3,4,0,0,0,0,0],[3,3,3,4,0,0,0,0],[3,3,3,3,4,0,0,0],[3,3,3,3,3,4,0,0],[3,3,3,3,3,3,4,0],[3,14,14,3,3,3,3,4]])
ts_tiles.append([[4,4,0,0,0,0,0,0],[3,3,4,4,0,0,0,0],[3,3,3,3,4,4,0,0],[3,3,3,3,3,3,4,4],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,14,3,14,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[4,0,0,0,0,0,0,0],[3,4,4,4,0,0,0,0],[3,3,3,3,4,4,4,0],[3,3,3,3,3,3,3,4],[3,14,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[4,4,0,0,0,0,0,0],[3,3,4,4,4,0,0,0],[3,3,3,3,3,4,4,4],[3,3,3,3,3,3,3,3],[3,3,3,14,14,14,14,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[3,3,3,3,4,0,0,0],[3,3,3,3,4,0,0,0],[3,3,3,3,3,4,0,0],[3,14,3,3,3,4,0,0],[3,3,3,3,3,3,4,0],[3,3,3,3,3,3,4,0],[3,14,3,3,3,3,3,4],[3,3,3,3,3,3,3,4]])
ts_tiles.append([[3,3,3,3,3,4,0,0],[3,3,3,3,3,4,0,0],[3,3,3,3,3,4,0,0],[3,3,3,3,3,3,4,0],[3,3,3,3,3,3,4,0],[3,3,3,3,3,3,4,0],[3,3,3,3,3,3,3,4],[3,3,3,3,3,3,3,4]])
ts_tiles.append([[3,3,4,0,0,0,0,0],[3,3,3,4,0,0,0,0],[3,3,3,4,0,0,0,0],[3,3,3,4,0,0,0,0],[3,3,3,3,4,0,0,0],[3,3,3,3,4,0,0,0],[3,3,3,3,4,0,0,0],[3,3,3,3,3,4,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[4,4,4,4,0,0,0,0],[3,3,3,3,4,4,4,4],[14,3,3,3,3,3,3,3],[14,3,3,3,3,3,3,3],[3,3,3,14,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[4,4,4,0,0,0,0,0],[3,3,3,4,4,4,4,0],[3,3,3,3,3,3,3,4],[3,3,3,3,14,3,3,3],[3,3,3,14,14,3,3,3],[3,3,3,14,3,3,3,14],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[0,0,0,0,0,12,12,12],[0,0,12,12,12,12,12,12],[12,12,12,12,7,7,6,6],[0,12,12,12,12,7,7,6],[0,0,0,12,12,12,12,12],[0,0,0,0,0,0,12,12],[0,0,0,0,0,0,0,0]])
ts_tiles.append([[12,12,7,6,6,7,12,12],[12,12,7,7,7,7,12,12],[0,12,12,7,7,12,12,0],[0,12,12,7,7,12,12,0],[0,0,12,12,12,12,0,0],[0,0,12,12,12,12,0,0],[0,0,0,12,12,0,0,0],[0,0,0,12,12,0,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[12,12,12,0,0,0,0,0],[12,12,12,12,12,12,0,0],[6,6,7,7,12,12,12,12],[6,7,7,12,12,12,12,0],[12,12,12,12,12,0,0,0],[12,12,0,0,0,0,0,0],[0,0,0,0,0,0,0,0]])
ts_tiles.append([[0,0,0,0,2,0,0,0],[0,0,0,2,1,2,0,0],[0,0,2,1,1,1,2,0],[0,2,1,1,2,1,1,2],[0,2,1,1,1,2,1,2],[0,2,1,1,1,1,1,2],[0,0,2,1,1,1,2,0],[0,0,0,2,2,2,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,0],[3,6,3,6,3,6,3,6],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3],[3,3,3,3,3,3,3,3]])
ts_tiles.append([[0,0,0,0,0,10,0,10],[0,0,0,0,10,0,0,0],[0,0,0,0,10,0,10,0],[0,0,0,0,0,0,0,10],[0,0,0,10,0,10,0,10],[0,0,0,0,0,10,10,12],[0,0,10,0,10,0,10,12],[0,0,10,10,0,10,12,12]])
ts_tiles.append([[10,0,10,0,0,0,0,0],[0,0,0,10,0,0,0,0],[0,10,0,10,0,0,0,0],[10,0,0,0,0,0,0,0],[10,0,10,0,10,0,0,0],[12,10,10,0,0,0,0,0],[12,10,0,10,0,10,0,0],[12,12,10,0,10,10,0,0]])
ts_tiles.append([[0,10,0,10,12,10,12,12],[0,0,12,0,12,12,12,12],[10,0,10,12,12,12,12,12],[0,12,10,12,12,12,12,12],[10,0,12,12,12,12,12,12],[0,10,10,12,12,12,12,12],[0,12,12,12,12,12,12,7],[0,0,12,12,12,12,12,7]])
ts_tiles.append([[12,12,10,12,10,0,10,0],[12,12,12,12,0,12,0,0],[12,12,12,12,12,10,0,10],[12,12,12,12,12,10,12,0],[12,12,12,12,12,12,0,10],[12,12,12,12,12,10,10,0],[7,12,12,12,12,12,12,0],[7,12,12,12,12,12,0,0]])
ts_tiles.append([[12,0,12,12,12,12,12,7],[0,12,12,12,12,12,7,7],[0,12,12,12,12,12,7,6],[12,12,12,12,12,12,7,6],[0,12,12,12,12,7,7,6],[0,0,12,12,12,7,7,6],[0,0,12,12,12,7,6,6],[0,0,0,12,12,12,6,6]])
ts_tiles.append([[7,12,12,12,12,12,0,12],[7,7,12,12,12,12,12,0],[6,7,12,12,12,12,12,0],[6,7,12,12,12,12,12,12],[6,7,7,12,12,12,12,0],[6,7,7,12,12,12,0,0],[6,6,7,12,12,12,0,0],[6,6,12,12,12,0,0,0]])
ts_tiles.append([[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,1,7],[0,0,0,0,0,1,6,7],[0,0,0,0,1,6,7,7],[0,0,0,0,1,10,2,7],[0,0,0,0,1,10,2,7],[0,0,0,0,1,10,2,7]])
ts_tiles.append([[1,0,0,0,0,0,0,0],[1,0,0,0,0,0,0,0],[7,1,0,0,0,0,0,0],[7,6,1,0,0,0,0,0],[7,7,6,1,0,0,0,0],[7,2,10,1,0,0,0,0],[7,2,10,1,0,0,0,0],[7,2,10,1,0,0,0,0]])
ts_tiles.append([[0,0,0,0,1,6,7,2],[0,0,0,0,1,6,7,2],[0,0,0,0,1,6,7,2],[0,0,0,0,1,10,2,7],[0,0,0,0,1,10,2,7],[0,0,0,0,1,10,2,7],[0,0,0,1,6,7,7,7],[0,0,1,6,7,7,7,7]])
ts_tiles.append([[2,7,6,1,0,0,0,0],[2,7,6,1,0,0,0,0],[2,7,6,1,0,0,0,0],[7,2,10,1,0,0,0,0],[7,2,10,1,0,0,0,0],[7,2,10,1,0,0,0,0],[7,7,7,6,1,0,0,0],[7,7,7,7,6,1,0,0]])
ts_tiles.append([[0,0,1,6,7,7,7,7],[0,0,1,6,7,7,7,7],[0,1,6,7,7,7,1,1],[0,1,6,7,7,1,1,6],[0,1,6,7,1,0,1,6],[0,1,6,7,1,0,1,6],[0,1,6,6,1,0,1,6],[0,0,1,1,0,0,0,1]])
ts_tiles.append([[7,7,7,7,6,1,0,0],[7,7,7,7,6,1,0,0],[1,1,7,7,7,6,1,0],[6,1,1,7,7,6,1,0],[6,1,0,1,7,6,1,0],[6,1,0,1,7,6,1,0],[6,1,0,1,6,6,1,0],[1,0,0,0,1,1,0,0]])
ts_tiles.append([[0,0,0,0,0,0,1,1],[0,1,1,0,0,1,9,9],[0,1,13,1,0,1,9,11],[0,1,13,13,1,1,9,11],[0,0,1,13,2,1,9,11],[0,0,0,1,10,2,13,13],[0,0,0,0,1,10,2,2],[0,0,0,0,0,1,10,2]])
ts_tiles.append([[1,1,0,0,0,0,0,0],[9,9,1,0,0,1,1,0],[11,9,1,0,1,13,1,0],[11,9,1,1,13,13,1,0],[11,9,1,2,13,1,0,0],[13,13,2,10,1,0,0,0],[2,2,10,1,0,0,0,0],[2,10,1,0,0,0,0,0]])
ts_tiles.append([[0,0,0,0,0,1,10,2],[0,0,0,0,0,1,13,2],[0,0,0,0,0,1,10,13],[0,0,0,0,0,1,10,2],[0,0,0,0,0,1,10,1],[0,0,0,0,1,13,13,1],[0,0,0,1,13,13,13,1],[0,0,0,1,1,1,1,1]])
ts_tiles.append([[2,10,1,0,0,0,0,0],[2,13,1,0,0,0,0,0],[13,10,1,0,0,0,0,0],[2,10,1,0,0,0,0,0],[1,10,1,0,0,0,0,0],[1,13,13,1,0,0,0,0],[1,13,13,13,1,0,0,0],[1,1,1,1,1,0,0,0]])
ts_tiles.append([[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1,-1,-1,-1]])
ts_mapwidth = 32
ts_mapheight = 28
ts_map =[
[ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 ,
18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 ],
[ 33 , 34 , 35 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 36 , 37 , 38 , 39 , 40 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 41 , 42 , 43 ,
44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 5 , 6 , 22 , 23 , 28 , 7 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 4 ],
[ 1 , 1 , 1 , 12 , 19 , 19 , 19 , 19 , 19 , 19 , 40 , 40 , 34 , 11 , 10 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 21 ],
[ 1 , 1 , 1 , 18 , 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 30 ,
29 , 8 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 20 ],
[ 1 , 2 , 15 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 13 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 12 , 24 ],
[ 14 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 31 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 18 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 , 19 ,
19 , 19 , 19 , 19 , 27 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 15 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 26 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 13 , 1 , 1 , 1 , 1 , 1 , 1 , 14 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 31 , 1 , 1 , 1 , 1 , 1 , 12 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 27 , 1 , 1 , 1 , 1 , 18 , 19 , 19 , 19 , 19 ,
26 ],
[ 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 40 , 40 , 40 , 40 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 26 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 25 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 ,
25 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ]
]
ts_mapwidth = 32
ts_mapheight = 28
ts_map =[
[ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 ,
18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 ],
[ 33 , 34 , 35 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 36 , 37 , 38 , 39 , 40 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 41 , 42 , 43 ,
44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 14 , 27 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 14 , 19 , 24 ,
9 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 15 , 19 , 19 , 19 ,
33 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 12 , 19 , 19 , 19 , 19 , 19 ,
32 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 18 , 19 , 19 , 19 , 19 , 19 ,
24 , 13 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 14 , 1 , 1 , 1 , 1 , 1 , 1 , 14 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 32 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 14 , 19 , 9 , 1 , 1 , 1 , 1 , 4 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 13 , 1 , 1 , 1 , 1 , 1 , 14 , 27 , 5 , 6 , 22 , 23 ],
[ 3 , 16 , 17 , 19 , 19 , 33 , 1 , 1 , 1 , 1 , 21 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 31 , 1 , 1 , 1 , 2 , 15 , 19 , 19 , 19 , 19 , 19 , 24 ],
[ 19 , 19 , 19 , 19 , 19 , 32 , 1 , 1 , 1 , 1 , 20 , 19 , 19 , 24 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 40 , 40 , 40 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 24 , 19 , 19 , 9 , 1 , 1 , 4 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 33 , 1 , 1 , 21 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 32 , 1 , 1 , 20 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 24 , 19 , 19 , 19 , 19 , 40 , 40 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
26 ],
[ 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 26 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 25 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 ,
25 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ]
]
ts_mapwidth = 32
ts_mapheight = 28
ts_map =[
[ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 ,
18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 ],
[ 33 , 34 , 35 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 36 , 37 , 38 , 39 , 40 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 41 , 42 , 43 ,
44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 14 , 35 , 34 ,
11 , 10 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 3 , 16 , 17 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 ,
19 , 19 , 35 , 34 , 11 , 10 , 5 , 6 , 22 , 23 , 19 , 19 , 19 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 5 , 6 , 22 , 23 , 9 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 33 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 32 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 9 , 1 , 1 , 1 , 2 , 15 , 35 , 34 , 11 , 10 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 33 , 1 , 1 , 14 , 19 , 19 , 19 , 19 , 19 , 19 , 28 ,
7 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 ],
[ 19 , 25 , 19 , 19 , 19 , 32 , 2 , 15 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 28 , 7 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 ,
19 , 19 , 19 , 19 , 28 , 7 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 ],
[ 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 30 , 29 , 8 , 1 , 1 , 1 , 1 , 1 , 1 , 12 , 19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 35 , 34 , 11 , 10 , 1 , 1 , 18 ,
26 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 40 , 40 , 19 ,
19 ],
[ 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
26 ],
[ 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 26 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 25 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 ,
25 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ]
]
ts_mapwidth = 32
ts_mapheight = 28
ts_map =[
[ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 ,
18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 ],
[ 33 , 34 , 35 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 36 , 37 , 38 , 39 , 40 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 41 , 42 , 43 ,
44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 35 , 34 , 11 , 10 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 19 , 19 , 19 , 19 , 35 , 34 , 11 , 10 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 35 , 34 , 11 , 10 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 35 , 34 , 11 ,
10 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 35 ,
34 , 11 , 10 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 35 , 34 , 11 , 10 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 27 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 12 , 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 27 , 1 , 1 , 1 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 18 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 25 , 30 , 29 , 8 , 1 , 1 , 1 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 12 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 30 , 29 , 8 ],
[ 35 , 34 , 11 , 10 , 1 , 1 , 1 , 18 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ],
[ 19 , 19 , 19 , 26 , 40 , 40 , 40 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 25 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
26 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 25 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ],
[ 19 , 19 , 19 , 26 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 ,
25 ],
[ 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 , 19 , 19 , 19 , 24 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ,
19 ]
]
"""
Converts a single row(8 pixels) of a tile into sms format.
Inputs: pixels - a list of 8 4 bit palette indexes where the first list item contains palette index for pixel 7, the second for pixel 6 etc.
Outputs: List of four bytes where the first byte contains bit 0 of each pixel, the second contains bit 1 of each pixel and so on.
"""
def ConvertRow(pixels):
out = []
for i in range(4):
out_byte = 0
for j in range(8):
out_byte += ((pixels[7-j] >> i) & 0x1) << j
out.append( out_byte )
return out
"""
Converts a 24bit per pixel color to 6 bit per pixel.
Inputs: rgb_val - A 24 bit color in format bbbbbbbbggggggggrrrrrrrr
Outputs: 6 bit per color representation of input in format xxbbggrr
"""
def Convert24bppTo6bpp(rgb_val):
red = (rgb_val & 0xc0)
green = ((rgb_val >> 0x08) & 0xc0)
blue = ((rgb_val >> 0x10) & 0xc0)
return (red >> 0x06) | (green >> 0x04) | (blue >> 0x02)
def Main():
f = open("Map.txt","w")
f.write("\nPaletteStart:\n")
f.write(".db " + ", ".join([ ("$%x" % Convert24bppTo6bpp(color)) for color in ts_palette]))
f.write("\nPaletteEnd:\n\n")
f.write("\nTilesStart:\n")
for i in range(len(ts_tiles) -1):
f.write("\n;Tile %i\n" % (i + tile_base_index) )
for tile_row in ts_tiles[i]:
converted_row = ConvertRow(tile_row)
f.write("\n.db " + " ".join(["$%x" % pixel for pixel in converted_row]))
f.write("\nTilesEnd:\n\n")
f.write("TilemapStart:\n")
for row in ts_map:
f.write("\n.dw " + " ".join(["$" + ("%x" % (max(tile_index -1, 0) + tile_base_index)).zfill(4) for tile_index in row]))
f.write("\nTilemapEnd:\n\n")
f.close()
Main() | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.