repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
freedesktop-unofficial-mirror/gstreamer__sdk__cerbero | cerbero/ide/xcode/fwlib.py | 13 | 8433 | #!/usr/bin/env python
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from collections import defaultdict
from cerbero.config import Architecture
from cerbero.ide.pkgconfig import PkgConfig
from cerbero.utils import shell
from cerbero.utils import messages as m
class FrameworkLibrary(object):
'''
Combine several shared library into a single shared library to be used
as a Framework.
The complete list of shared libraries needed are guessed with pkg-config
but full paths can be used too with use_pkgconfig=False
'''
def __init__(self, libname, install_name, libraries, arch):
self.libname = libname
self.install_name = install_name
self.libraries = libraries
self.arch = arch
self.use_pkgconfig = True
self.universal_archs = None
def create(self):
if self.arch == Architecture.X86:
self.arch = 'i386'
if self.use_pkgconfig:
libraries = self._libraries_paths(self.libraries)
else:
libraries = self.libraries
self._create_framework_library(libraries)
def _libraries_paths(self, libraries):
pkgconfig = PkgConfig(libraries)
libdirs = pkgconfig.libraries_dirs()
libs = pkgconfig.libraries()
libspaths = []
for lib in libs:
for libdir in libdirs:
libpath = os.path.join(libdir, self._get_lib_file_name (lib))
if not os.path.exists(libpath):
continue
libspaths.append(os.path.realpath(libpath))
break
return libspaths
def _create_framework_library(self, libraries):
raise NotImplemented
def _get_lib_file_name(self, lib):
return lib
class DynamicFrameworkLibrary(FrameworkLibrary):
def _create_framework_library(self, libraries):
libraries = ' '.join(['-Wl,-reexport_library %s' % x for x in libraries])
shell.call('clang -dynamiclib -o %s -arch %s -install_name %s %s' %
(self.libname, self.arch, self.install_name, libraries))
def _get_lib_file_name(self, lib):
return 'lib%s.dylib' % lib
class StaticFrameworkLibrary(FrameworkLibrary):
def _get_lib_file_name(self, lib):
return 'lib%s.a' % lib
def _split_static_lib(self, lib, thin_arch=None):
'''Splits the static lib @lib into its object files
Splits the static lib @lib into its object files and returns
a new temporary directory where the .o files should be found.
if @thin_arch was provided, it considers the @lib to be a fat
binary and takes its thin version for the @thin_arch specified
before retrieving the object files.
'''
lib_tmpdir = tempfile.mkdtemp()
shutil.copy(lib, lib_tmpdir)
tmplib = os.path.join(lib_tmpdir, os.path.basename(lib))
if thin_arch: #should be a fat file, split only to the arch we want
newname = '%s_%s' % (thin_arch, os.path.basename(lib))
shell.call('lipo %s -thin %s -output %s' % (tmplib,
thin_arch, newname), lib_tmpdir)
tmplib = os.path.join (lib_tmpdir, newname)
shell.call('ar -x %s' % tmplib, lib_tmpdir)
# object files with the same name in an archive are overwritten
# when they are extracted. osx's ar does not support the N count
# modifier so after extracting all the files we remove them from
# the archive to extract those with duplicated names.
# eg:
# ar t libavcodec.a -> mlpdsp.o mlpdsp.o (2 objects with the same name)
# ar d libavcodec.a mlpdsp.o (we remove the first one)
# ar t libavcodec.a -> mlpdsp.o (we only the second one now)
files = shell.check_call('ar -t %s' % tmplib, lib_tmpdir).split('\n')
# FIXME: We should use collections.Count but it's only available in
# python 2.7+
dups = defaultdict(int)
for f in files:
dups[f] += 1
for f in dups:
if dups[f] <= 1:
continue
for x in range(dups[f]):
path = os.path.join(lib_tmpdir, f)
new_path = os.path.join(lib_tmpdir, 'dup%d_' % x + f)
# The duplicated overwrote the first one, so extract it again
shell.call('ar -x %s %s' % (tmplib, f), lib_tmpdir)
shutil.move (path, new_path)
shell.call('ar -d %s %s' % (tmplib, f), lib_tmpdir)
return lib_tmpdir
def _check_duplicated_symbols(self, files, tmpdir):
for f in files:
syms = defaultdict(list)
symbols = shell.check_call('nm -UA %s' % f, tmpdir).split('\n')
# nm output is: test.o: 00000000 T _gzwrite
# (filename, address, symbol type, symbols_name)
for s in symbols:
s = s.split(' ')
if len(s) == 4 and s[2] == 'T':
syms[s[3]].append(s)
dups = {}
for k,v in syms.iteritems():
if len(v) > 1:
dups[k] = v
if dups:
m.warning ("The static library contains duplicated symbols")
for k, v in dups.iteritems():
m.message (k) # symbol name
for l in v:
m.message (" %s" % l[0]) # file
def _create_framework_library(self, libraries):
tmpdir = tempfile.mkdtemp()
libname = os.path.basename (self.libname) # just to make sure
if self.arch == Architecture.UNIVERSAL:
archs = self.universal_archs
else:
archs = [self.arch]
archs = [a if a != Architecture.X86 else 'i386' for a in archs]
for thin_arch in archs:
object_files_md5 = []
shell.call ('mkdir -p %s' % thin_arch, tmpdir)
tmpdir_thinarch = os.path.join(tmpdir, thin_arch)
for lib in libraries:
libprefix = os.path.split(lib)[-1].replace('.', '_')
if len(archs) > 1: #should be a fat file, split only to the arch we want
libprefix += '_%s_' % thin_arch
lib_tmpdir = self._split_static_lib(lib, thin_arch)
else:
lib_tmpdir = self._split_static_lib(lib)
obj_files = shell.ls_files(['*.o'], lib_tmpdir)
for obj_f in obj_files:
obj_path = os.path.join(lib_tmpdir, obj_f)
md5 = shell.check_call('md5 -q %s' % obj_path).split('\n')[0]
md5 = '%s-%s' % (md5, os.path.getsize(obj_path))
if md5 not in object_files_md5:
shell.call('cp %s %s' % (obj_path, '%s-%s' % (libprefix, obj_f)), tmpdir_thinarch)
shell.call('ar -cqS %s %s-%s' % (libname, libprefix, obj_f), tmpdir_thinarch)
object_files_md5.append(md5)
shutil.rmtree(lib_tmpdir)
shell.call('ar -s %s' % (libname), tmpdir_thinarch)
files = [os.path.join(tmpdir, arch, libname) for arch in archs]
self._check_duplicated_symbols(files, tmpdir)
if len(archs) > 1:
#merge the final libs into a fat file again
shell.call('lipo %s -create -output %s' % (' '.join(files), self.install_name), tmpdir)
else:
shell.call('cp %s %s' % (os.path.join(tmpdir, self.arch, libname), self.install_name), tmpdir)
shutil.rmtree(tmpdir)
| lgpl-2.1 |
robertostling/hnmt | hnmt/bleu.py | 1 | 3963 | #!/usr/bin/env python3
"""calculate BLEU scores
script taken from https://github.com/vikasnar/Bleu
and adjusted by Jörg Tiedemann
"""
import sys
import codecs
import os
import math
import operator
import json
import functools
def fetch_data(cand, ref):
""" Store each reference and candidate sentences as a list """
references = []
if os.path.isdir(ref):
for root, dirs, files in os.walk(ref):
for f in files:
reference_file = codecs.open(os.path.join(root, f), 'r', 'utf-8')
references.append(reference_file.readlines())
else:
reference_file = codecs.open(ref, 'r', 'utf-8')
references.append(reference_file.readlines())
candidate_file = codecs.open(cand, 'r', 'utf-8')
candidate = candidate_file.readlines()
return candidate, references
def count_ngram(candidate, references, n, lowercase):
clipped_count = 0
count = 0
r = 0
c = 0
for si in range(len(candidate)):
# Calculate precision for each sentence
ref_counts = []
ref_lengths = []
# Build dictionary of ngram counts
for reference in references:
ref_sentence = reference[si]
ngram_d = {}
words = ref_sentence.strip().split()
ref_lengths.append(len(words))
limits = len(words) - n + 1
# loop through the sentance consider the ngram length
for i in range(limits):
ngram = ' '.join(words[i:i+n])
if lowercase:
ngram = ngram.lower()
if ngram in ngram_d.keys():
ngram_d[ngram] += 1
else:
ngram_d[ngram] = 1
ref_counts.append(ngram_d)
# candidate
cand_sentence = candidate[si]
cand_dict = {}
words = cand_sentence.strip().split()
limits = len(words) - n + 1
for i in range(0, limits):
ngram = ' '.join(words[i:i + n])
if lowercase:
ngram = ngram.lower()
if ngram in cand_dict:
cand_dict[ngram] += 1
else:
cand_dict[ngram] = 1
clipped_count += clip_count(cand_dict, ref_counts)
count += limits
r += best_length_match(ref_lengths, len(words))
c += len(words)
if clipped_count == 0:
pr = 0
else:
pr = float(clipped_count) / count
bp = brevity_penalty(c, r)
return pr, bp
def clip_count(cand_d, ref_ds):
"""Count the clip count for each ngram considering all references"""
count = 0
for m in cand_d.keys():
m_w = cand_d[m]
m_max = 0
for ref in ref_ds:
if m in ref:
m_max = max(m_max, ref[m])
m_w = min(m_w, m_max)
count += m_w
return count
def best_length_match(ref_l, cand_l):
"""Find the closest length of reference to that of candidate"""
least_diff = abs(cand_l-ref_l[0])
best = ref_l[0]
for ref in ref_l:
if abs(cand_l-ref) < least_diff:
least_diff = abs(cand_l-ref)
best = ref
return best
def brevity_penalty(c, r):
if c > r:
bp = 1
elif c == 0:
bp = 0
else:
bp = math.exp(1-(float(r)/c))
return bp
def geometric_mean(precisions):
return (functools.reduce(operator.mul, precisions)) ** (1.0 / len(precisions))
def BLEU(candidate, references, lowercase=False):
precisions = []
for i in range(4):
pr, bp = count_ngram(candidate, references, i+1, lowercase)
precisions.append(pr)
bleu = geometric_mean(precisions) * bp
return bleu, precisions[0], precisions[1], precisions[2], precisions[3], bp
if __name__ == "__main__":
candidate, references = fetch_data(sys.argv[1], sys.argv[2])
bleu = BLEU(candidate, references)
print('BLEU = %.4f (%.3f, %.3f, %.3f, %.3f, BP = %.3f)' % (bleu))
| gpl-3.0 |
philsch/ansible | lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py | 11 | 10209 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <[email protected]>
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_loadbalancer_rule_member
short_description: Manages load balancer rule members on Apache CloudStack based clouds.
description:
- Add and remove load balancer rule members.
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "René Moser (@resmo)"
options:
name:
description:
- The name of the load balancer rule.
required: true
ip_address:
description:
- Public IP address from where the network traffic will be load balanced from.
- Only needed to find the rule if C(name) is not unique.
required: false
default: null
aliases: [ 'public_ip' ]
vms:
description:
- List of VMs to assign to or remove from the rule.
required: true
aliases: [ 'vm' ]
state:
description:
- Should the VMs be present or absent from the rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
project:
description:
- Name of the project the firewall rule is related to.
required: false
default: null
domain:
description:
- Domain the rule is related to.
required: false
default: null
account:
description:
- Account the rule is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the rule should be located.
- If not set, default zone is used.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Add VMs to an existing load balancer
- local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vms:
- web01
- web02
# Remove a VM from an existing load balancer
- local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vms:
- web01
- web02
state: absent
# Rolling upgrade of hosts
- hosts: webservers
serial: 1
pre_tasks:
- name: Remove from load balancer
local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vm: "{{ ansible_hostname }}"
state: absent
tasks:
# Perform update
post_tasks:
- name: Add to load balancer
local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vm: "{{ ansible_hostname }}"
state: present
'''
RETURN = '''
---
id:
description: UUID of the rule.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
zone:
description: Name of zone the rule is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the rule is related to.
returned: success
type: string
sample: Production
account:
description: Account the rule is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the rule is related to.
returned: success
type: string
sample: example domain
algorithm:
description: Load balancer algorithm used.
returned: success
type: string
sample: "source"
cidr:
description: CIDR to forward traffic from.
returned: success
type: string
sample: ""
name:
description: Name of the rule.
returned: success
type: string
sample: "http-lb"
description:
description: Description of the rule.
returned: success
type: string
sample: "http load balancer rule"
protocol:
description: Protocol of the rule.
returned: success
type: string
sample: "tcp"
public_port:
description: Public port.
returned: success
type: string
sample: 80
private_port:
description: Private IP address.
returned: success
type: string
sample: 80
public_ip:
description: Public IP address.
returned: success
type: string
sample: "1.2.3.4"
vms:
description: Rule members.
returned: success
type: list
sample: '[ "web01", "web02" ]'
tags:
description: List of resource tags associated with the rule.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
state:
description: State of the rule.
returned: success
type: string
sample: "Add"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CloudStackException,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackLBRuleMember(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackLBRuleMember, self).__init__(module)
self.returns = {
'publicip': 'public_ip',
'algorithm': 'algorithm',
'cidrlist': 'cidr',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'privateport': 'private_port',
}
def get_rule(self):
args = self._get_common_args()
args.update({
'name': self.module.params.get('name'),
'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None,
})
if self.module.params.get('ip_address'):
args['publicipid'] = self.get_ip_address(key='id')
rules = self.cs.listLoadBalancerRules(**args)
if rules:
if len(rules['loadbalancerrule']) > 1:
self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name'])
return rules['loadbalancerrule'][0]
return None
def _get_common_args(self):
return {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
def _get_members_of_rule(self, rule):
res = self.cs.listLoadBalancerRuleInstances(id=rule['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return res.get('loadbalancerruleinstance', [])
def _ensure_members(self, operation):
if operation not in ['add', 'remove']:
self.module.fail_json(msg="Bad operation: %s" % operation)
rule = self.get_rule()
if not rule:
self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name'))
existing = {}
for vm in self._get_members_of_rule(rule=rule):
existing[vm['name']] = vm['id']
wanted_names = self.module.params.get('vms')
if operation == 'add':
cs_func = self.cs.assignToLoadBalancerRule
to_change = set(wanted_names) - set(existing.keys())
else:
cs_func = self.cs.removeFromLoadBalancerRule
to_change = set(wanted_names) & set(existing.keys())
if not to_change:
return rule
args = self._get_common_args()
vms = self.cs.listVirtualMachines(**args)
to_change_ids = []
for name in to_change:
for vm in vms.get('virtualmachine', []):
if vm['name'] == name:
to_change_ids.append(vm['id'])
break
else:
self.module.fail_json(msg="Unknown VM: %s" % name)
if to_change_ids:
self.result['changed'] = True
if to_change_ids and not self.module.check_mode:
res = cs_func(
id=rule['id'],
virtualmachineids=to_change_ids,
)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res)
rule = self.get_rule()
return rule
def add_members(self):
return self._ensure_members('add')
def remove_members(self):
return self._ensure_members('remove')
def get_result(self, rule):
super(AnsibleCloudStackLBRuleMember, self).get_result(rule)
if rule:
self.result['vms'] = []
for vm in self._get_members_of_rule(rule=rule):
self.result['vms'].append(vm['name'])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
ip_address=dict(aliases=['public_ip']),
vms=dict(required=True, aliases=['vm'], type='list'),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
project=dict(),
account=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module)
state = module.params.get('state')
if state in ['absent']:
rule = acs_lb_rule_member.remove_members()
else:
rule = acs_lb_rule_member.add_members()
result = acs_lb_rule_member.get_result(rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
GeyerA/android_external_chromium_org | tools/gdb/gdb_chrome.py | 30 | 10090 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GDB support for Chrome types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import gdb_chrome
end
This module relies on the WebKit gdb module already existing in
your Python path.
Use
(gdb) p /r any_variable
to print |any_variable| without using any printers.
"""
import datetime
import gdb
import webkit
# When debugging this module, set the below variable to True, and then use
# (gdb) python del sys.modules['gdb_chrome']
# (gdb) python import gdb_chrome
# to reload.
_DEBUGGING = False
pp_set = gdb.printing.RegexpCollectionPrettyPrinter("chromium")
def typed_ptr(ptr):
"""Prints a pointer along with its exact type.
By default, gdb would print just the address, which takes more
steps to interpret.
"""
# Returning this as a cast expression surrounded by parentheses
# makes it easier to cut+paste inside of gdb.
return '((%s)%s)' % (ptr.dynamic_type, ptr)
class Printer(object):
def __init__(self, val):
self.val = val
class StringPrinter(Printer):
def display_hint(self):
return 'string'
class String16Printer(StringPrinter):
def to_string(self):
return webkit.ustring_to_string(self.val['_M_dataplus']['_M_p'])
pp_set.add_printer(
'string16',
'^string16|std::basic_string<(unsigned short|char16|base::char16).*>$',
String16Printer);
class GURLPrinter(StringPrinter):
def to_string(self):
return self.val['spec_']
pp_set.add_printer('GURL', '^GURL$', GURLPrinter)
class FilePathPrinter(StringPrinter):
def to_string(self):
return self.val['path_']['_M_dataplus']['_M_p']
pp_set.add_printer('FilePath', '^FilePath$', FilePathPrinter)
class SizePrinter(Printer):
def to_string(self):
return '%sx%s' % (self.val['width_'], self.val['height_'])
pp_set.add_printer('gfx::Size', '^gfx::(Size|SizeF|SizeBase<.*>)$', SizePrinter)
class PointPrinter(Printer):
def to_string(self):
return '%s,%s' % (self.val['x_'], self.val['y_'])
pp_set.add_printer('gfx::Point', '^gfx::(Point|PointF|PointBase<.*>)$',
PointPrinter)
class RectPrinter(Printer):
def to_string(self):
return '%s %s' % (self.val['origin_'], self.val['size_'])
pp_set.add_printer('gfx::Rect', '^gfx::(Rect|RectF|RectBase<.*>)$',
RectPrinter)
class SmartPtrPrinter(Printer):
def to_string(self):
return '%s%s' % (self.typename, typed_ptr(self.ptr()))
class ScopedRefPtrPrinter(SmartPtrPrinter):
typename = 'scoped_refptr'
def ptr(self):
return self.val['ptr_']
pp_set.add_printer('scoped_refptr', '^scoped_refptr<.*>$', ScopedRefPtrPrinter)
class LinkedPtrPrinter(SmartPtrPrinter):
typename = 'linked_ptr'
def ptr(self):
return self.val['value_']
pp_set.add_printer('linked_ptr', '^linked_ptr<.*>$', LinkedPtrPrinter)
class WeakPtrPrinter(SmartPtrPrinter):
typename = 'base::WeakPtr'
def ptr(self):
flag = ScopedRefPtrPrinter(self.val['ref_']['flag_']).ptr()
if flag and flag['is_valid_']:
return self.val['ptr_']
return gdb.Value(0).cast(self.val['ptr_'].type)
pp_set.add_printer('base::WeakPtr', '^base::WeakPtr<.*>$', WeakPtrPrinter)
class CallbackPrinter(Printer):
"""Callbacks provide no usable information so reduce the space they take."""
def to_string(self):
return '...'
pp_set.add_printer('base::Callback', '^base::Callback<.*>$', CallbackPrinter)
class LocationPrinter(Printer):
def to_string(self):
return '%s()@%s:%s' % (self.val['function_name_'].string(),
self.val['file_name_'].string(),
self.val['line_number_'])
pp_set.add_printer('tracked_objects::Location', '^tracked_objects::Location$',
LocationPrinter)
class LockPrinter(Printer):
def to_string(self):
try:
if self.val['owned_by_thread_']:
return 'Locked by thread %s' % self.val['owning_thread_id_']
else:
return 'Unlocked'
except gdb.error:
return 'Unknown state'
pp_set.add_printer('base::Lock', '^base::Lock$', LockPrinter)
class TimeDeltaPrinter(object):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['delta_']))
def timedelta(self):
return self._timedelta
def to_string(self):
return str(self._timedelta)
pp_set.add_printer('base::TimeDelta', '^base::TimeDelta$', TimeDeltaPrinter)
class TimeTicksPrinter(TimeDeltaPrinter):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['ticks_']))
pp_set.add_printer('base::TimeTicks', '^base::TimeTicks$', TimeTicksPrinter)
class TimePrinter(object):
def __init__(self, val):
timet_offset = gdb.parse_and_eval(
'base::Time::kTimeTToMicrosecondsOffset')
self._datetime = (datetime.datetime.fromtimestamp(0) +
datetime.timedelta(microseconds=
int(val['us_'] - timet_offset)))
def datetime(self):
return self._datetime
def to_string(self):
return str(self._datetime)
pp_set.add_printer('base::Time', '^base::Time$', TimePrinter)
class IpcMessagePrinter(Printer):
def header(self):
return self.val['header_'].cast(
gdb.lookup_type('IPC::Message::Header').pointer())
def to_string(self):
message_type = self.header()['type']
return '%s of kind %s line %s' % (
self.val.dynamic_type,
(message_type >> 16).cast(gdb.lookup_type('IPCMessageStart')),
message_type & 0xffff)
def children(self):
yield ('header_', self.header().dereference())
yield ('capacity_', self.val['capacity_'])
yield ('variable_buffer_offset_', self.val['variable_buffer_offset_'])
for field in self.val.type.fields():
if field.is_base_class:
continue
yield (field.name, self.val[field.name])
pp_set.add_printer('IPC::Message', '^IPC::Message$', IpcMessagePrinter)
class NotificationRegistrarPrinter(Printer):
def to_string(self):
try:
registrations = self.val['registered_']
vector_finish = registrations['_M_impl']['_M_finish']
vector_start = registrations['_M_impl']['_M_start']
if vector_start == vector_finish:
return 'Not watching notifications'
if vector_start.dereference().type.sizeof == 0:
# Incomplete type: b/8242773
return 'Watching some notifications'
return ('Watching %s notifications; '
'print %s->registered_ for details') % (
int(vector_finish - vector_start),
typed_ptr(self.val.address))
except gdb.error:
return 'NotificationRegistrar'
pp_set.add_printer('content::NotificationRegistrar',
'^content::NotificationRegistrar$',
NotificationRegistrarPrinter)
class SiteInstanceImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
return 'SiteInstanceImpl@%s for %s' % (
self.val.address, self.val['site_'])
def children(self):
yield ('id_', self.val['id_'])
yield ('has_site_', self.val['has_site_'])
if self.val['browsing_instance_']['ptr_']:
yield ('browsing_instance_', self.val['browsing_instance_']['ptr_'])
if self.val['process_']:
yield ('process_', typed_ptr(self.val['process_']))
if self.val['render_process_host_factory_']:
yield ('render_process_host_factory_',
self.val['render_process_host_factory_'])
pp_set.add_printer('content::SiteInstanceImpl', '^content::SiteInstanceImpl$',
SiteInstanceImplPrinter)
class RenderProcessHostImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
pid = ''
try:
child_process_launcher_ptr = (
self.val['child_process_launcher_']['impl_']['data_']['ptr'])
if child_process_launcher_ptr:
context = (child_process_launcher_ptr['context_']['ptr_'])
if context:
pid = ' PID %s' % str(context['process_']['process_'])
except gdb.error:
# The definition of the Context type may not be available.
# b/8242773
pass
return 'RenderProcessHostImpl@%s%s' % (self.val.address, pid)
def children(self):
yield ('id_', self.val['id_'])
yield ('render_widget_hosts_',
self.val['render_widget_hosts_']['data_'])
yield ('fast_shutdown_started_', self.val['fast_shutdown_started_'])
yield ('deleting_soon_', self.val['deleting_soon_'])
yield ('pending_views_', self.val['pending_views_'])
yield ('visible_widgets_', self.val['visible_widgets_'])
yield ('backgrounded_', self.val['backgrounded_'])
yield ('widget_helper_', self.val['widget_helper_'])
yield ('is_initialized_', self.val['is_initialized_'])
yield ('browser_context_', typed_ptr(self.val['browser_context_']))
yield ('sudden_termination_allowed_',
self.val['sudden_termination_allowed_'])
yield ('ignore_input_events_', self.val['ignore_input_events_'])
yield ('is_guest_', self.val['is_guest_'])
pp_set.add_printer('content::RenderProcessHostImpl',
'^content::RenderProcessHostImpl$',
RenderProcessHostImplPrinter)
gdb.printing.register_pretty_printer(gdb, pp_set, replace=_DEBUGGING)
| bsd-3-clause |
bplancher/odoo | addons/l10n_be_invoice_bba/invoice.py | 8 | 11056 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = \
('none', _('Free Communication'))
res.append(('bba', _('BBA Structured Communication')))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
@api.onchange('partner_id')
def _onchange_partner_id(self):
result = super(account_invoice, self)._onchange_partner_id()
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if self.partner_id:
if (self.type == 'out_invoice'):
reference_type = self.partner_id.out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(self.type, reference_type, self.partner_id.id, '')['value']['reference']
self.reference_type = reference_type or 'none'
self.reference = reference
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise UserError(_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise UserError(_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise UserError(_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise UserError(_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your Odoo support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise UserError(_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise UserError(_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba' and 'reference' in vals:
if self.check_bbacomm(vals['reference']):
reference = re.sub('\D', '', vals['reference'])
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise UserError(_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['reference', 'reference_type']),
]
account_invoice()
| agpl-3.0 |
laurentgo/pants | src/python/pants/backend/jvm/repository.py | 17 | 1337 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
class Repository(object):
"""An artifact repository, such as a maven repo."""
def __init__(self, name=None, url=None, push_db_basedir=None, **kwargs):
"""
:param string url: Optional URL of the repository.
:param string push_db_basedir: Push history file base directory.
"""
self.name = name
self.url = url
self.push_db_basedir = push_db_basedir
def push_db(self, target):
return os.path.join(self.push_db_basedir,
target.provides.org,
target.provides.name,
'publish.properties')
def __eq__(self, other):
return (
isinstance(other, Repository) and
(self.name, self.url, self.push_db_basedir) == (other.name, other.url, other.push_db_basedir)
)
def __hash__(self):
return hash((self.name, self.url, self.push_db_basedir))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "{} -> {} ({})".format(self.name, self.url, self.push_db_basedir)
| apache-2.0 |
tanium/pytan | BUILD/doc/source/examples/ask_saved_question_by_name_sse_code.py | 1 | 3070 | # import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["sse"] = True
kwargs["qtype"] = u'saved'
kwargs["name"] = u'Installed Applications'
print "...CALLING: handler.ask with args: {}".format(kwargs)
response = handler.ask(**kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: Pretty print of response:"
print pprint.pformat(response)
print "...OUTPUT: Equivalent Question if it were to be asked in the Tanium Console: "
print response['question_object'].query_text
if response['question_results']:
# call the export_obj() method to convert response to CSV and store it in out
export_kwargs = {}
export_kwargs['obj'] = response['question_results']
export_kwargs['export_format'] = 'csv'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: CSV Results of response: "
print out
| mit |
Wojtechnology/Muzit | StreetMuse/lib/python3.4/site-packages/pip/_vendor/html5lib/treebuilders/etree.py | 915 | 12621 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| apache-2.0 |
erjohnso/ansible | lib/ansible/modules/network/junos/junos_config.py | 8 | 13496 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on devices running Juniper JUNOS
description:
- This module provides an implementation for working with the active
configuration running on Juniper JUNOS devices. It provides a set
of arguments for loading configuration, performing rollback operations
and zeroing the active configuration on the device.
extends_documentation_fragment: junos
options:
lines:
description:
- This argument takes a list of C(set) or C(delete) configuration
lines to push into the remote device. Each line must start with
either C(set) or C(delete). This argument is mutually exclusive
with the I(src) argument.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) argument.
required: false
default: null
version_added: "2.2"
src_format:
description:
- The I(src_format) argument specifies the format of the configuration
found int I(src). If the I(src_format) argument is not provided,
the module will attempt to determine the format of the configuration
file specified in I(src).
required: false
default: null
choices: ['xml', 'set', 'text', 'json']
version_added: "2.2"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
required: false
default: null
zeroize:
description:
- The C(zeroize) argument is used to completely sanitize the
remote device configuration back to initial defaults. This
argument will effectively remove all current configuration
statements on the remote device.
required: false
default: null
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_config
replace:
description:
- The C(replace) argument will instruct the remote device to
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuration loaded
from this module.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the I(update) argument to C(replace). This argument
will be removed in a future release. The C(replace) and C(update) argument
is mutually exclusive.
required: false
choices: ['yes', 'no']
default: false
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
update:
description:
- This argument will decide how to load the configuration
data particulary when the candidate configuration and loaded
configuration contain conflicting statements. Following are
accepted values.
C(merge) combines the data in the loaded configuration with the
candidate configuration. If statements in the loaded configuration
conflict with statements in the candidate configuration, the loaded
statements replace the candidate ones.
C(override) discards the entire candidate configuration and replaces
it with the loaded configuration.
C(replace) substitutes each hierarchy level in the loaded configuration
for the corresponding level.
required: false
default: merge
choices: ['merge', 'override', 'replace']
version_added: "2.3"
confirm_commit:
description:
- This argument will execute commit operation on remote device.
It can be used to confirm a previous commit.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.4"
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Loading JSON-formatted configuration I(json) is supported
starting in Junos OS Release 16.1 onwards.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
"""
EXAMPLES = """
- name: load configure file into device
junos_config:
src: srx.cfg
comment: update config
provider: "{{ netconf }}"
- name: load configure lines into device
junos_config:
lines:
- set interfaces ge-0/0/1 unit 0 description "Test interface"
- set vlans vlan01 description "Test vlan"
comment: update config
provider: "{{ netconf }}"
- name: rollback the configuration to id 10
junos_config:
rollback: 10
provider: "{{ netconf }}"
- name: zero out the current configuration
junos_config:
zeroize: yes
provider: "{{ netconf }}"
- name: confirm a previous commit
junos_config:
confirm_commit: yes
provider: "{{ netconf }}"
"""
RETURN = """
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
"""
import re
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import get_diff, load_config, get_configuration
from ansible.module_utils.junos import commit_configuration, discard_changes, locked_config
from ansible.module_utils.junos import junos_argument_spec, load_configuration
from ansible.module_utils.junos import check_args as junos_check_args
from ansible.module_utils.netconf import send_request
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
try:
from lxml.etree import Element, fromstring
except ImportError:
from xml.etree.ElementTree import Element, fromstring
try:
from lxml.etree import ParseError
except ImportError:
try:
from xml.etree.ElementTree import ParseError
except ImportError:
# for Python < 2.7
from xml.parsers.expat import ExpatError
ParseError = ExpatError
USE_PERSISTENT_CONNECTION = True
DEFAULT_COMMENT = 'configured by junos_config'
def check_args(module, warnings):
junos_check_args(module, warnings)
if module.params['replace'] is not None:
module.fail_json(msg='argument replace is deprecated, use update')
def zeroize(ele):
return send_request(ele, Element('request-system-zeroize'))
def rollback(ele, id='0'):
return get_diff(ele, id)
def guess_format(config):
try:
json.loads(config)
return 'json'
except ValueError:
pass
try:
fromstring(config)
return 'xml'
except ParseError:
pass
if config.startswith('set') or config.startswith('delete'):
return 'set'
return 'text'
def filter_delete_statements(module, candidate):
reply = get_configuration(module, format='set')
match = reply.find('.//configuration-set')
if match is None:
# Could not find configuration-set in reply, perhaps device does not support it?
return candidate
config = to_native(match.text, encoding='latin-1')
modified_candidate = candidate[:]
for index, line in reversed(list(enumerate(candidate))):
if line.startswith('delete'):
newline = re.sub('^delete', 'set', line)
if newline not in config:
del modified_candidate[index]
return modified_candidate
def configure_device(module, warnings, candidate):
kwargs = {}
config_format = None
if module.params['src']:
config_format = module.params['src_format'] or guess_format(str(candidate))
if config_format == 'set':
kwargs.update({'format': 'text', 'action': 'set'})
else:
kwargs.update({'format': config_format, 'action': module.params['update']})
if isinstance(candidate, string_types):
candidate = candidate.split('\n')
# this is done to filter out `delete ...` statements which map to
# nothing in the config as that will cause an exception to be raised
if any((module.params['lines'], config_format == 'set')):
candidate = filter_delete_statements(module, candidate)
kwargs['format'] = 'text'
kwargs['action'] = 'set'
return load_config(module, candidate, warnings, **kwargs)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
lines=dict(type='list'),
src=dict(type='path'),
src_format=dict(choices=['xml', 'text', 'set', 'json']),
# update operations
update=dict(default='merge', choices=['merge', 'override', 'replace', 'update']),
# deprecated replace in Ansible 2.3
replace=dict(type='bool'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
confirm_commit=dict(type='bool', default=False),
# config operations
backup=dict(type='bool', default=False),
rollback=dict(type='int'),
zeroize=dict(default=False, type='bool'),
)
argument_spec.update(junos_argument_spec)
mutually_exclusive = [('lines', 'src', 'rollback', 'zeroize')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
candidate = module.params['lines'] or module.params['src']
commit = not module.check_mode
result = {'changed': False, 'warnings': warnings}
if module.params['backup']:
for conf_format in ['set', 'text']:
reply = get_configuration(module, format=conf_format)
match = reply.find('.//configuration-%s' % conf_format)
if match is not None:
break
else:
module.fail_json(msg='unable to retrieve device configuration')
result['__backup__'] = match.text.strip()
rollback_id = module.params['rollback']
if rollback_id:
diff = rollback(module, rollback_id)
if commit:
kwargs = {
'comment': module.params['comment']
}
with locked_config(module):
load_configuration(module, rollback=rollback_id)
commit_configuration(module, **kwargs)
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
elif module.params['zeroize']:
if commit:
zeroize(module)
result['changed'] = True
else:
if candidate:
with locked_config(module):
diff = configure_device(module, warnings, candidate)
if diff:
if commit:
kwargs = {
'comment': module.params['comment']
}
if module.params['confirm'] > 0:
kwargs.update({
'confirm': True,
'confirm_timeout': module.params['confirm']
})
commit_configuration(module, **kwargs)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
elif module.params['confirm_commit']:
with locked_config(module):
# confirm a previous commit
commit_configuration(module)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
vilorious/pyload | module/network/Browser.py | 40 | 4190 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from logging import getLogger
from HTTPRequest import HTTPRequest
from HTTPDownload import HTTPDownload
class Browser(object):
__slots__ = ("log", "options", "bucket", "cj", "_size", "http", "dl")
def __init__(self, bucket=None, options={}):
self.log = getLogger("log")
self.options = options #holds pycurl options
self.bucket = bucket
self.cj = None # needs to be setted later
self._size = 0
self.renewHTTPRequest()
self.dl = None
def renewHTTPRequest(self):
if hasattr(self, "http"): self.http.close()
self.http = HTTPRequest(self.cj, self.options)
def setLastURL(self, val):
self.http.lastURL = val
# tunnel some attributes from HTTP Request to Browser
lastEffectiveURL = property(lambda self: self.http.lastEffectiveURL)
lastURL = property(lambda self: self.http.lastURL, setLastURL)
code = property(lambda self: self.http.code)
cookieJar = property(lambda self: self.cj)
def setCookieJar(self, cj):
self.cj = cj
self.http.cj = cj
@property
def speed(self):
if self.dl:
return self.dl.speed
return 0
@property
def size(self):
if self._size:
return self._size
if self.dl:
return self.dl.size
return 0
@property
def arrived(self):
if self.dl:
return self.dl.arrived
return 0
@property
def percent(self):
if not self.size: return 0
return (self.arrived * 100) / self.size
def clearCookies(self):
if self.cj:
self.cj.clear()
self.http.clearCookies()
def clearReferer(self):
self.http.lastURL = None
def abortDownloads(self):
self.http.abort = True
if self.dl:
self._size = self.dl.size
self.dl.abort = True
def httpDownload(self, url, filename, get={}, post={}, ref=True, cookies=True, chunks=1, resume=False,
progressNotify=None, disposition=False):
""" this can also download ftp """
self._size = 0
self.dl = HTTPDownload(url, filename, get, post, self.lastEffectiveURL if ref else None,
self.cj if cookies else None, self.bucket, self.options, progressNotify, disposition)
name = self.dl.download(chunks, resume)
self._size = self.dl.size
self.dl = None
return name
def load(self, *args, **kwargs):
""" retrieves page """
return self.http.load(*args, **kwargs)
def putHeader(self, name, value):
""" add a header to the request """
self.http.putHeader(name, value)
def addAuth(self, pwd):
"""Adds user and pw for http auth
:param pwd: string, user:password
"""
self.options["auth"] = pwd
self.renewHTTPRequest() #we need a new request
def removeAuth(self):
if "auth" in self.options: del self.options["auth"]
self.renewHTTPRequest()
def setOption(self, name, value):
"""Adds an option to the request, see HTTPRequest for existing ones"""
self.options[name] = value
def deleteOption(self, name):
if name in self.options: del self.options[name]
def clearHeaders(self):
self.http.clearHeaders()
def close(self):
""" cleanup """
if hasattr(self, "http"):
self.http.close()
del self.http
if hasattr(self, "dl"):
del self.dl
if hasattr(self, "cj"):
del self.cj
if __name__ == "__main__":
browser = Browser()#proxies={"socks5": "localhost:5000"})
ip = "http://www.whatismyip.com/automation/n09230945.asp"
#browser.getPage("http://google.com/search?q=bar")
#browser.getPage("https://encrypted.google.com/")
#print browser.getPage(ip)
#print browser.getRedirectLocation("http://google.com/")
#browser.getPage("https://encrypted.google.com/")
#browser.getPage("http://google.com/search?q=bar")
browser.httpDownload("http://speedtest.netcologne.de/test_10mb.bin", "test_10mb.bin")
| gpl-3.0 |
gregdek/ansible | lib/ansible/modules/network/aci/aci_contract.py | 12 | 8772 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_contract
short_description: Manage contract resources (vz:BrCP)
description:
- Manage Contract resources on Cisco ACI fabrics.
notes:
- This module does not manage Contract Subjects, see M(aci_contract_subject) to do this.
Contract Subjects can still be removed using this module.
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
seealso:
- module: aci_contract_subject
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(vz:BrCP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
contract:
description:
- The name of the contract.
type: str
required: yes
aliases: [ contract_name, name ]
description:
description:
- Description for the contract.
type: str
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
type: str
required: yes
aliases: [ tenant_name ]
scope:
description:
- The scope of a service contract.
- The APIC defaults to C(context) when unset during creation.
type: str
choices: [ application-profile, context, global, tenant ]
priority:
description:
- The desired QoS class to be used.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ level1, level2, level3, unspecified ]
dscp:
description:
- The target Differentiated Service (DSCP) value.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ AF11, AF12, AF13, AF21, AF22, AF23, AF31, AF32, AF33, AF41, AF42, AF43, CS0, CS1, CS2, CS3, CS4, CS5, CS6, CS7, EF, VA, unspecified ]
aliases: [ target ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract
aci_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
description: Communication between web-servers and database
scope: application-profile
state: present
delegate_to: localhost
- name: Remove an existing contract
aci_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
state: absent
delegate_to: localhost
- name: Query a specific contract
aci_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
state: query
delegate_to: localhost
register: query_result
- name: Query all contracts
aci_contract:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', required=False, aliases=['contract_name', 'name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
scope=dict(type='str', choices=['application-profile', 'context', 'global', 'tenant']),
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']), # No default provided on purpose
dscp=dict(type='str',
choices=['AF11', 'AF12', 'AF13', 'AF21', 'AF22', 'AF23', 'AF31', 'AF32', 'AF33', 'AF41', 'AF42', 'AF43',
'CS0', 'CS1', 'CS2', 'CS3', 'CS4', 'CS5', 'CS6', 'CS7', 'EF', 'VA', 'unspecified'],
aliases=['target']), # No default provided on purpose
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'tenant']],
['state', 'present', ['contract', 'tenant']],
],
)
contract = module.params['contract']
description = module.params['description']
scope = module.params['scope']
priority = module.params['priority']
dscp = module.params['dscp']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
module_object=contract,
target_filter={'name': contract},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzBrCP',
class_config=dict(
name=contract,
descr=description,
scope=scope,
prio=priority,
targetDscp=dscp,
),
)
aci.get_diff(aci_class='vzBrCP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
atsaki/libcloud | example_loadbalancer.py | 58 | 2483 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.types import Provider, State
from libcloud.loadbalancer.providers import get_driver
def main():
cls = get_driver(Provider.RACKSPACE)
driver = cls('username', 'api key', region='ord')
balancers = driver.list_balancers()
print(balancers)
# creating a balancer which balances traffic across two
# nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer
# itself listens on port 80/tcp
new_balancer_name = 'testlb' + os.urandom(4).encode('hex')
members = (Member(None, '192.168.86.1', 80),
Member(None, '192.168.86.2', 8080))
new_balancer = driver.create_balancer(name=new_balancer_name,
algorithm=Algorithm.ROUND_ROBIN,
port=80,
protocol='http',
members=members)
print(new_balancer)
# wait for balancer to become ready
# NOTE: in real life code add timeout to not end up in
# endless loop when things go wrong on provider side
while True:
balancer = driver.get_balancer(balancer_id=new_balancer.id)
if balancer.state == State.RUNNING:
break
print('sleeping for 30 seconds for balancers to become ready')
time.sleep(30)
# fetch list of members
members = balancer.list_members()
print(members)
# remove first member
balancer.detach_member(members[0])
# remove the balancer
driver.destroy_balancer(new_balancer)
if __name__ == '__main__':
main()
| apache-2.0 |
raskul/CROWN | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
joerg84/arangodb | 3rdParty/V8/v5.7.0.0/tools/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
varunarya10/basicdb | basicdb/utils.py | 3 | 4735 | import re
BATCH_QUERY_REGEX = re.compile(r'Item\.(\d+)\.(.*)')
PUT_ATTRIBUTE_QUERY_REGEX = re.compile(r'Attribute\.(\d+)\.(Name|Value|Replace)')
DELETE_QUERY_ARG_REGEX = re.compile(r'Attribute\.(\d+)\.(Name|Value)')
EXPECTED_QUERY_ARG_REGEX = re.compile(r'Expected\.(\d+)\.(Name|Value|Exists)')
def extract_numbered_args(regex, params):
attrs = {}
for (k, v) in params.iteritems():
match = regex.match(k)
if not match:
continue
idx, elem = match.groups()
if idx not in attrs:
attrs[idx] = {}
attrs[idx][elem] = v
return attrs
def extract_batch_additions_and_replacements_from_query_params(req):
args = extract_numbered_args(BATCH_QUERY_REGEX, req._params)
additions = {}
replacements = {}
for data in args.values():
if 'ItemName' in data:
item_name = data['ItemName']
subargs = extract_numbered_args(PUT_ATTRIBUTE_QUERY_REGEX, data)
for subdata in subargs.values():
if 'Name' in subdata and 'Value' in subdata:
attr_name = subdata['Name']
attr_value = subdata['Value']
if 'Replace' in subdata and subdata['Replace'] == 'true':
if item_name not in replacements:
replacements[item_name] = {}
if attr_name not in replacements[item_name]:
replacements[item_name][attr_name] = set()
replacements[item_name][attr_name].add(attr_value)
else:
if item_name not in additions:
additions[item_name] = {}
if attr_name not in additions[item_name]:
additions[item_name][attr_name] = set()
additions[item_name][attr_name].add(attr_value)
return additions, replacements
def extract_batch_deletions_from_query_params(req):
args = extract_numbered_args(BATCH_QUERY_REGEX, req._params)
deletions = {}
for data in args.values():
if 'ItemName' in data:
item_name = data['ItemName']
subargs = extract_numbered_args(DELETE_QUERY_ARG_REGEX, data)
for subdata in subargs.values():
if 'Name' not in subdata:
continue
attr_name = subdata['Name']
if item_name not in deletions:
deletions[item_name] = {}
if attr_name not in deletions[item_name]:
deletions[item_name][attr_name] = set()
if 'Value' in subdata:
deletions[item_name][attr_name].add(subdata['Value'])
else:
import basicdb
deletions[item_name][attr_name].add(basicdb.AllAttributes)
return deletions
def extract_additions_and_replacements_from_query_params(req):
args = extract_numbered_args(PUT_ATTRIBUTE_QUERY_REGEX, req._params)
additions = {}
replacements = {}
for idx, data in args.iteritems():
if 'Name' in args[idx] and 'Value' in args[idx]:
name = args[idx]['Name']
value = args[idx]['Value']
if 'Replace' in args[idx] and args[idx]['Replace'] == 'true':
if name not in replacements:
replacements[name] = set()
replacements[name].add(value)
else:
if name not in additions:
additions[name] = set()
additions[name].add(value)
return additions, replacements
def extract_expectations_from_query_params(req):
args = extract_numbered_args(EXPECTED_QUERY_ARG_REGEX, req._params)
expectations = set()
for data in args.values():
if 'Name' in data:
if 'Value' in data:
expected_value = data['Value']
elif 'Exists' in data:
val = data['Exists']
expected_value = not (val == 'false')
expectations.add((data['Name'], expected_value))
return expectations
def extract_deletions_from_query_params(req):
args = extract_numbered_args(DELETE_QUERY_ARG_REGEX, req._params)
deletions = {}
for data in args.values():
if 'Name' not in data:
continue
attr_name = data['Name']
if attr_name not in deletions:
deletions[attr_name] = set()
if 'Value' in data:
deletions[attr_name].add(data['Value'])
else:
import basicdb
deletions[attr_name].add(basicdb.AllAttributes)
return deletions
| apache-2.0 |
mfherbst/spack | var/spack/repos/builtin/packages/pigz/package.py | 4 | 1886 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Pigz(MakefilePackage):
"""A parallel implementation of gzip for modern multi-processor,
multi-core machines."""
homepage = "http://zlib.net/pigz/"
url = "https://github.com/madler/pigz/archive/v2.3.4.tar.gz"
version('2.4', '3c8a601db141d3013ef9fe5f2daaf73f')
version('2.3.4', 'c109057050b15edf3eb9bb4d0805235e')
depends_on('zlib')
def build(self, spec, prefix):
make()
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.man.man1)
install('pigz', "%s/pigz" % prefix.bin)
install('pigz.1', "%s/pigz.1" % prefix.man.man1)
| lgpl-2.1 |
liberatorqjw/scikit-learn | sklearn/tests/test_multiclass.py | 8 | 21910 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
"""Test that ovr works with classes that are always present or absent."""
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 0)
# in the zero-one classifier, the score for 0 is greater than the score for
# one.
assert_greater(scores[0][0], scores[0][1])
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
"Test that the OvO doesn't screw the encoding of string labels"
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
svc = LinearSVC()
ovo = OneVsOneClassifier(svc)
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
HackLinux/python-adb | adb_test.py | 3 | 6340 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adb."""
import cStringIO
import struct
import unittest
from adb import adb_commands
from adb import adb_protocol
import common_stub
BANNER = 'blazetest'
LOCAL_ID = 1
REMOTE_ID = 2
class BaseAdbTest(unittest.TestCase):
@classmethod
def _ExpectWrite(cls, usb, command, arg0, arg1, data):
usb.ExpectWrite(cls._MakeHeader(command, arg0, arg1, data))
usb.ExpectWrite(data)
if command == 'WRTE':
cls._ExpectRead(usb, 'OKAY', 0, 0)
@classmethod
def _ExpectRead(cls, usb, command, arg0, arg1, data=''):
usb.ExpectRead(cls._MakeHeader(command, arg0, arg1, data))
if data:
usb.ExpectRead(data)
if command == 'WRTE':
cls._ExpectWrite(usb, 'OKAY', LOCAL_ID, REMOTE_ID, '')
@classmethod
def _ConvertCommand(cls, command):
return sum(ord(c) << (i * 8) for i, c in enumerate(command))
@classmethod
def _MakeHeader(cls, command, arg0, arg1, data):
command = cls._ConvertCommand(command)
magic = command ^ 0xFFFFFFFF
checksum = adb_protocol.AdbMessage.CalculateChecksum(data)
return struct.pack('<6I', command, arg0, arg1, len(data), checksum, magic)
@classmethod
def _ExpectConnection(cls, usb):
cls._ExpectWrite(usb, 'CNXN', 0x01000000, 4096, 'host::%s\0' % BANNER)
cls._ExpectRead(usb, 'CNXN', 0, 0, 'device::\0')
@classmethod
def _ExpectOpen(cls, usb, service):
cls._ExpectWrite(usb, 'OPEN', LOCAL_ID, 0, service)
cls._ExpectRead(usb, 'OKAY', REMOTE_ID, LOCAL_ID)
@classmethod
def _ExpectClose(cls, usb):
cls._ExpectRead(usb, 'CLSE', REMOTE_ID, 0)
cls._ExpectWrite(usb, 'CLSE', LOCAL_ID, REMOTE_ID, '')
@classmethod
def _Connect(cls, usb):
return adb_commands.AdbCommands.Connect(usb, BANNER)
class AdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
usb = common_stub.StubUsb()
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, '%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(usb, 'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(usb)
return usb
def testConnect(self):
usb = common_stub.StubUsb()
self._ExpectConnection(usb)
adb_commands.AdbCommands.Connect(usb, BANNER)
def testSmallResponseShell(self):
command = 'keepin it real'
response = 'word.'
usb = self._ExpectCommand('shell', command, response)
adb_commands = self._Connect(usb)
self.assertEqual(response, adb_commands.Shell(command))
def testBigResponseShell(self):
command = 'keepin it real big'
# The data doesn't have to be big, the point is that it just concatenates
# the data from different WRTEs together.
responses = ['other stuff, ', 'and some words.']
usb = self._ExpectCommand('shell', command, *responses)
adb_commands = self._Connect(usb)
self.assertEqual(''.join(responses), adb_commands.Shell(command))
def testStreamingResponseShell(self):
command = 'keepin it real big'
# expect multiple lines
responses = ['other stuff, ', 'and some words.']
usb = self._ExpectCommand('shell', command, *responses)
adb_commands = self._Connect(usb)
response_count = 0
for (expected,actual) in zip(responses, adb_commands.StreamingShell(command)):
self.assertEqual(expected, actual)
response_count = response_count + 1
self.assertEqual(len(responses), response_count)
def testReboot(self):
usb = self._ExpectCommand('reboot', '', '')
adb_commands = self._Connect(usb)
adb_commands.Reboot()
def testRebootBootloader(self):
usb = self._ExpectCommand('reboot', 'bootloader', '')
adb_commands = self._Connect(usb)
adb_commands.RebootBootloader()
def testRemount(self):
usb = self._ExpectCommand('remount', '', '')
adb_commands = self._Connect(usb)
adb_commands.Remount()
def testRoot(self):
usb = self._ExpectCommand('root', '', '')
adb_commands = self._Connect(usb)
adb_commands.Root()
class FilesyncAdbTest(BaseAdbTest):
@classmethod
def _MakeSyncHeader(cls, command, *int_parts):
command = cls._ConvertCommand(command)
return struct.pack('<%dI' % (len(int_parts) + 1), command, *int_parts)
@classmethod
def _MakeWriteSyncPacket(cls, command, data='', size=None):
return cls._MakeSyncHeader(command, size or len(data)) + data
@classmethod
def _ExpectSyncCommand(cls, write_commands, read_commands):
usb = common_stub.StubUsb()
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, 'sync:\0')
while write_commands or read_commands:
if write_commands:
command = write_commands.pop(0)
cls._ExpectWrite(usb, 'WRTE', LOCAL_ID, REMOTE_ID, command)
if read_commands:
command = read_commands.pop(0)
cls._ExpectRead(usb, 'WRTE', REMOTE_ID, LOCAL_ID, command)
cls._ExpectClose(usb)
return usb
def testPush(self):
filedata = 'alo there, govnah'
mtime = 100
send = [
self._MakeWriteSyncPacket('SEND', '/data,33272'),
self._MakeWriteSyncPacket('DATA', filedata),
self._MakeWriteSyncPacket('DONE', size=mtime),
]
data = 'OKAY\0\0\0\0'
usb = self._ExpectSyncCommand([''.join(send)], [data])
adb_commands = self._Connect(usb)
adb_commands.Push(cStringIO.StringIO(filedata), '/data', mtime=mtime)
def testPull(self):
filedata = "g'ddayta, govnah"
recv = self._MakeWriteSyncPacket('RECV', '/data')
data = [
self._MakeWriteSyncPacket('DATA', filedata),
self._MakeWriteSyncPacket('DONE'),
]
usb = self._ExpectSyncCommand([recv], [''.join(data)])
adb_commands = self._Connect(usb)
self.assertEqual(filedata, adb_commands.Pull('/data'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
MissCatLady/AlarmEZ | ENV/lib/python2.7/site-packages/pip/vendor/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| mit |
304471720/mongrel2 | examples/ws/python/echo.py | 55 | 3488 | import simplejson as json
from mongrel2 import handler
import wsutil
import sys
import time
import re
sender_id = "82209006-86FF-4982-B5EA-D1E29E55D480"
conn = handler.Connection(sender_id, "tcp://127.0.0.1:9990",
"tcp://127.0.0.1:9989")
CONNECTION_TIMEOUT=5
closingMessages={}
badUnicode=re.compile(u'[\ud800-\udfff]')
logf=open('echo.log','wb')
#logf=open('/dev/null','wb')
#logf=sys.stdout
def abortConnection(conn,req,reason='none',code=None):
#print 'abort',conn,req,reason,code
if code is not None:
#print "Closing cleanly\n"
conn.reply_websocket(req,code+reason,opcode=wsutil.OP_CLOSE)
closingMessages[req.conn_id]=(time.time(),req.sender)
else:
conn.reply(req,'')
print >>logf,'abort',code,reason
while True:
now=time.time()
logf.flush()
for k,(t,uuid) in closingMessages.items():
if now > t+CONNECTION_TIMEOUT:
conn.send(uuid,k,'')
try:
req = conn.recv()
except:
print "FAILED RECV"
continue
if req.is_disconnect():
#print "DISCONNECTED", req.conn_id
continue
if req.headers.get('METHOD') == 'WEBSOCKET_HANDSHAKE':
#print "HANDSHAKE"
conn.reply(req,
'\r\n'.join([
"HTTP/1.1 101 Switching Protocols",
"Upgrade: websocket",
"Connection: Upgrade",
"Sec-WebSocket-Accept: %s\r\n\r\n"])%req.body)
continue
if req.headers.get('METHOD') != 'WEBSOCKET':
print 'METHOD is Not WEBSOCKET:',req.headers#,req.body
conn.reply(req,'')
continue
try:
#print 'headers',req.headers
flags = int(req.headers.get('FLAGS'),16)
fin = flags&0x80==0x80
rsvd=flags & 0x70
opcode=flags & 0xf
wsdata = req.body
#print fin,rsvd,opcode,len(wsdata),wsdata
#logf.write('\n')
except:
#print "Unable to decode FLAGS"
abortConnection(conn,req,'WS decode failed')
#continue
if rsvd != 0:
abortConnection(conn,req,'reserved non-zero',
wsutil.CLOSE_PROTOCOL_ERROR)
continue
if opcode == wsutil.OP_CLOSE:
if req.conn_id in closingMessages:
del closingMessages[req.conn_id]
conn.reply(req,'')
else:
conn.reply_websocket(req,wsdata,opcode)
conn.reply(req,'')
continue
if req.conn_id in closingMessages:
continue
if opcode not in wsutil.opcodes:
abortConnection(conn,req,'Unknown opcode',
wsutil.CLOSE_PROTOCOL_ERROR)
continue
if (opcode & 0x8) != 0:
if opcode ==wsutil.OP_PING:
opcode = wsutil.OP_PONG
conn.reply_websocket(req,wsdata,opcode)
continue
if opcode == wsutil.OP_PONG:
continue # We don't send pings, so ignore pongs
if(opcode == wsutil.OP_TEXT):
try:
x=wsdata.decode('utf-8')
#Thank you for not fixing python issue8271 in 2.x :(
if badUnicode.search(x):
raise UnicodeError('Surrogates not allowed')
#for c in x:
#if (0xd800 <= ord(c) <= 0xdfff):
#raise UnicodeError('Surrogates not allowed')
except:
abortConnection(conn,req,'invalid UTF', wsutil.CLOSE_BAD_DATA)
continue
conn.reply_websocket(req,wsdata,opcode)
| bsd-3-clause |
kapilrastogi/Impala | tests/common/impala_cluster.py | 1 | 8569 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Basic object model of an Impala cluster (set of Impala processes).
#
import logging
import psutil
import socket
from getpass import getuser
from random import choice
from tests.common.impala_service import *
from tests.util.shell_util import exec_process_async, exec_process
from time import sleep
logging.basicConfig(level=logging.ERROR, format='%(threadName)s: %(message)s')
LOG = logging.getLogger('impala_cluster')
LOG.setLevel(level=logging.DEBUG)
# Represents a set of Impala processes. Each Impala process must be created with
# a basic set of command line options (beeswax_port, webserver_port, etc)
class ImpalaCluster(object):
def __init__(self):
self.__impalads, self.__statestoreds, self.__catalogd =\
self.__build_impala_process_lists()
LOG.info("Found %d impalad/%d statestored/%d catalogd process(es)" %\
(len(self.__impalads), len(self.__statestoreds), 1 if self.__catalogd else 0))
def refresh(self):
""" Re-loads the impalad/statestored/catalogd processes if they exist.
Helpful to confirm that processes have been killed.
"""
self.__impalads, self.__statestoreds, self.__catalogd =\
self.__build_impala_process_lists()
@property
def statestored(self):
"""
Returns the statestore process
Note: Currently we expectly a single statestore process, in the future this might
change in which case this should return the "active" statestore.
"""
# If no statestored process exists, return None.
return self.__statestoreds[0] if len(self.__statestoreds) > 0 else None
@property
def impalads(self):
"""Returns a list of the known impalad processes"""
return self.__impalads
@property
def catalogd(self):
"""Returns the catalogd process, or None if no catalogd process was found"""
return self.__catalogd
def get_first_impalad(self):
return self.impalads[0]
def get_any_impalad(self):
"""Selects a random impalad from the list of known processes"""
return choice(self.impalads)
def get_different_impalad(self, other_impalad):
"""Selects an impalad that is different from the given impalad"""
if len(self.impalads) <= 1:
assert 0, "Only %d impalads available to choose from" % len(self.impalads)
LOG.info("other_impalad: " + str(other_impalad))
LOG.info("Cluster: " + str(len(self.impalads)))
LOG.info("Cluster: " + str(self.impalads))
return choice([impalad for impalad in self.impalads if impalad != other_impalad])
def __build_impala_process_lists(self):
"""
Gets all the running Impala procs (with start arguments) on the machine.
Note: This currently only works for the local case. To support running in a cluster
environment this would need to enumerate each machine in the cluster.
"""
impalads = list()
statestored = list()
catalogd = None
for pid in psutil.get_pid_list():
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess, e:
# A process from get_pid_list() no longer exists, continue.
LOG.info(e)
continue
try:
if process.username != getuser():
continue
except KeyError, e:
if "uid not found" in str(e):
continue
raise
if process.name == 'impalad' and len(process.cmdline) >= 1:
impalads.append(ImpaladProcess(process.cmdline))
elif process.name == 'statestored' and len(process.cmdline) >= 1:
statestored.append(StateStoreProcess(process.cmdline))
elif process.name == 'catalogd' and len(process.cmdline) >=1:
catalogd = CatalogdProcess(process.cmdline)
return impalads, statestored, catalogd
# Represents a process running on a machine and common actions that can be performed
# on a process such as restarting or killing.
class Process(object):
def __init__(self, cmd):
self.cmd = cmd
assert cmd is not None and len(cmd) >= 1,\
'Process object must be created with valid command line argument list'
def get_pid(self):
"""Gets the pid of the process. Returns None if the PID cannot be determined"""
LOG.info("Attempting to find PID for %s" % ' '.join(self.cmd))
for pid in psutil.get_pid_list():
try:
process = psutil.Process(pid)
if set(self.cmd) == set(process.cmdline):
return pid
except psutil.NoSuchProcess, e:
# A process from get_pid_list() no longer exists, continue.
LOG.info(e)
LOG.info("No PID found for process cmdline: %s. Process is dead?" % self.cmd)
return None
def start(self):
LOG.info("Starting process: %s" % ' '.join(self.cmd))
self.process = exec_process_async(' '.join(self.cmd))
def wait(self):
"""Wait until the current process has exited, and returns
(return code, stdout, stderr)"""
LOG.info("Waiting for process: %s" % ' '.join(self.cmd))
stdout, stderr = self.process.communicate()
return self.process.returncode, stdout, stderr
def kill(self):
"""
Kills the given processes.
Returns the PID that was killed or None of no PID was found (process not running)
"""
pid = self.get_pid()
if pid is None:
assert 0, "No processes %s found" % self.cmd
LOG.info('Killing: %s (PID: %d)' % (' '.join(self.cmd), pid))
exec_process("kill -9 %d" % pid)
return pid
def restart(self):
"""Kills and restarts the process"""
self.kill()
# Wait for a bit so the ports will be released.
sleep(1)
self.start()
def __str__(self):
return "Command: %s PID: %s" % (self.cmd, self.get_pid())
# Base class for all Impala processes
class BaseImpalaProcess(Process):
def __init__(self, cmd, hostname):
super(BaseImpalaProcess, self).__init__(cmd)
self.hostname = hostname
def _get_webserver_port(self, default=None):
return int(self._get_arg_value('webserver_port', default))
def _get_arg_value(self, arg_name, default=None):
"""Gets the argument value for given argument name"""
for arg in self.cmd:
if ('%s=' % arg_name) in arg.strip().lstrip('-'):
return arg.split('=')[1]
if default is None:
assert 0, "No command line argument '%s' found." % arg_name
return default
# Represents an impalad process
class ImpaladProcess(BaseImpalaProcess):
def __init__(self, cmd):
super(ImpaladProcess, self).__init__(cmd, socket.gethostname())
self.service = ImpaladService(self.hostname, self._get_webserver_port(default=25000),
self.__get_beeswax_port(default=21000),
self.__get_be_port(default=22000),
self.__get_hs2_port(default=21050))
def __get_beeswax_port(self, default=None):
return int(self._get_arg_value('beeswax_port', default))
def __get_be_port(self, default=None):
return int(self._get_arg_value('be_port', default))
def __get_hs2_port(self, default=None):
return int(self._get_arg_value('hs2_port', default))
def start(self, wait_until_ready=True):
"""Starts the impalad and waits until the service is ready to accept connections."""
super(ImpaladProcess, self).start()
self.service.wait_for_metric_value('impala-server.ready',
expected_value=1, timeout=30)
# Represents a statestored process
class StateStoreProcess(BaseImpalaProcess):
def __init__(self, cmd):
super(StateStoreProcess, self).__init__(cmd, socket.gethostname())
self.service =\
StateStoredService(self.hostname, self._get_webserver_port(default=25010))
# Represents a catalogd process
class CatalogdProcess(BaseImpalaProcess):
def __init__(self, cmd):
super(CatalogdProcess, self).__init__(cmd, socket.gethostname())
self.service = CatalogdService(self.hostname,
self._get_webserver_port(default=25020), self.__get_port(default=26000))
def __get_port(self, default=None):
return int(self._get_arg_value('catalog_service_port', default))
| apache-2.0 |
z01nl1o02/tests | cnn_layer_size/show_conv_dim.py | 1 | 1705 | import os,sys,pdb
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--height','-H',help='input height',type=int)
ap.add_argument('--width','-W',help='input width',type=int)
ap.add_argument('--layers','-F',help='layer info txt with each line for one layer')
ap.add_argument('--deconv','-D',help='0 for conv 1 for deconv',type=int,default=0)
args = ap.parse_args()
def conv(CKSP,HW):
C,K,S,P = CKSP
H,W = HW
H = int((H - K + 2*P)/S + 1)
W = int((W - K + 2*P)/S + 1)
return H,W
def deconv(CKSP,HW):
C,K,S,P = CKSP
H,W = HW
H = int((H - 1) * S + K - 2*P)
W = int((W - 1) * S + K - 2*P)
return H,W
def calc_conv(layers,HW):
output = [HW]
for CKSP in layers:
HW = conv(CKSP,HW)
output.append(HW)
return output
def calc_deconv(layers,HW):
output = [HW]
for CKSP in layers:
HW = deconv(CKSP,HW)
output.append(HW)
return output
def parse_layers(filepath):
layers = []
with open(filepath,'rb') as f:
for line in f:
line = line.strip()
if line == "":
continue
C,K,S,P = [int(x) for x in line.split(',')] #channel number, kernel size, stride, padding
layers.append( (C,K,S,P) )
return layers
layers = parse_layers(args.layers)
if args.deconv == 0:
HW = calc_conv(layers, (args.height,args.width))
print 'conv...'
else:
HW = calc_deconv(layers, (args.height,args.width))
print 'deconv...'
print('input size (h,w) = (%d,%d)'%(args.height,args.width))
for (H,W),(C,K,S,P) in zip(HW[1:],layers):
print('(channel,kernel,stride,padding)=(%d,%d,%d,%d) (h,w) = (%d,%d)'%(C,K,S,P,H,W))
| gpl-2.0 |
cretingame/Yarr-fw | script/debug.py | 1 | 5687 | import os
import subprocess
script_path = os.getcwd() + "/" + os.path.splitext(__file__)[0] + ".tcl"
script_file = open(script_path, "w+")
os.chdir("..")
project_path = os.getcwd()
script_file.write(
"######################################################\n" +
"# Generated file to open the virtual logic analyyer\n" +
"######################################################\n" +
"\n\n" +
"#Run " + __file__+ " to generate this file\n\n")
ltx_files = []
ltx_file = None
cmds_debug=(
"start_gui\n" +
"open_hw\n" +
"connect_hw_server\n" +
"open_hw_target\n" +
"current_hw_device [lindex [get_hw_devices] 1]\n" +
"refresh_hw_device -update_hw_probes false [lindex [get_hw_devices] 1]\n" +
"set_property PROBES.FILE {}{}{} [lindex [get_hw_devices] 1]\n" +
"refresh_hw_device [lindex [get_hw_devices] 1]\n" +
"display_hw_ila_data [ get_hw_ila_data *]\n" +
"\n"
)
cmds_post_gui=(
"set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_l2p_s_1 -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]]\n" +
"set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_p2l_s_1 -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]]\n" +
"set_property CONTROL.TRIGGER_CONDITION OR [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]\n" +
"set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_l2p_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]]\n" +
"set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_p2l_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]]\n" +
"set_property CONTROL.TRIGGER_CONDITION OR [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]\n" +
"set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/ddr_app_cmd_en_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_5.ddr_debug\"}]]\n" +
"#set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_l2p_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_4.l2p_debug\"}]]\n"+
#"set_property TRIGGER_COMPARE_VALUE eq3'h1 [get_hw_probes app_0/ddr_app_cmd_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_5.ddr_debug\"}]]\n"+
#"set_property TRIGGER_COMPARE_VALUE eq29'h0000_2001 [get_hw_probes app_0/count_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]]\n"+
#"set_property TRIGGER_COMPARE_VALUE eq29'h0000_2001 [get_hw_probes app_0/gray_count_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]]\n" +
#"set_property TRIGGER_COMPARE_VALUE eq29'h0000_2001 [get_hw_probes app_0/ddr_count_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_5.ddr_debug\"}]]\n" +
"set root /home/asautaux/Yarr-fw/ila/\n"+
"set ilafile1 ila_axis_data\n"+
"set ilafile2 ila_wb_data\n"+
"#set ilafile3 ila_l2p_data\n"+
"set ilafile3 ila_ram_data\n"+
"\n\n" +
'for {set i 0} {$i < 10000} {incr i} {\n'+
' run_hw_ila [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1]]\n'+
' wait_on_hw_ila -timeout 1 [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1]]\n'+
' display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_0.axis_debug"}]]\n'+
' display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_2.pipelined_wishbone_debug"}]]\n'+
' #display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_4.l2p_debug"}]]\n'+
' display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_5.ddr_debug"}]]\n'+
' write_hw_ila_data $root$i-$ilafile1.ila hw_ila_data_1\n'+
' write_hw_ila_data $root$i-$ilafile2.ila hw_ila_data_2\n'+
' write_hw_ila_data $root$i-$ilafile3.ila hw_ila_data_3\n'+
' #write_hw_ila_data $root$i-$ilafile4.ila hw_ila_data_4\n'+
'}\n'
"\n"
)
for root, dirs, files in os.walk(project_path):
for file in files:
if file.endswith(".ltx"):
ltx_file = os.path.join(root, file)
ltx_files.append(ltx_file)
#print "Bitfile found : " + ltx_file
if len(ltx_files) == 0 :
print("No debug file found !\n")
elif len(ltx_files) == 1:
print("Debug file found : " + ltx_files[0])
ok = raw_input ("Will you debug with this file [Y/n] ?")
if ok[0].lower() == 'y':
ltx_file = ltx_files[0]
nb = 0
else:
ltx_file = None
else:
print("Several debug files found: ")
i = 0
for ltx_file in ltx_files:
print (str(i) + ": " + ltx_file)
i = i + 1
try:
nb = input("Choose a file by typing a number: ")
int(nb)
except:
print("You didn't enter a valid number")
ltx_file = None
else:
if nb >= len(ltx_files) or nb < 0 :
print("You didn't enter a valid number")
ltx_file = None
if (ltx_file != None):
ltx_file = ltx_files[nb]
cmds = cmds_debug.format('{',ltx_file,'}') #+ cmds_post_gui
script_file.write(cmds)
script_file.flush()
subprocess.call(["vivado", "-mode", "batch","-source", script_path])
else:
print "No debug file found !"
script_file.close()
| gpl-3.0 |
Gloomymoon/SecKill | manage.py | 1 | 1215 | #!/usr/bin/env python
# -*- coding: UTF-8 -*
import os
from app import create_app, db
from app.models import User, Role, Permission, Coupon, SecKill, Datemark
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('ATH_CONFIG') or 'default')
manager = Manager(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
def init_app_data():
db.drop_all()
db.create_all()
Coupon.insert_coupons()
Role.insert_roles()
u = User(ip="127.0.0.1", name="Administrator",
role=Role.query.filter_by(permissions=0xff).first())
db.session.add(u)
db.session.commit()
def calculated():
sk = SecKill.query.filter_by(win=False).filter_by(datemark=Datemark.today()).order_by(SecKill.kill_time).all()
sk1 = sk[0]
db.session.add(sk1)
db.session.commit() | gpl-3.0 |
xzturn/tensorflow | tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py | 3 | 4713 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BiasAdd MatMul in TF-TRT conversion."""
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GraphFn(self, x):
input_matrix_rows = 4
input_matrix_columns = 144
b = self._ConstOp((input_matrix_columns, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = x1 + b
b = self._ConstOp((input_matrix_rows, 144))
x2 = self.trt_incompatible_op(x)
x2 = math_ops.matmul(x2, b, transpose_a=True)
x2 = gen_array_ops.reshape(x2, [4, -1])
x2 = self.trt_incompatible_op(x2)
b = self._ConstOp((4, input_matrix_columns))
x3 = math_ops.matmul(x, b, transpose_b=True)
b = self._ConstOp((16, input_matrix_rows))
x4 = self.trt_incompatible_op(x)
x4 = math_ops.matmul(x4, b, transpose_b=True, transpose_a=True)
x4 = gen_array_ops.reshape(x4, [4, -1])
x4 = self.trt_incompatible_op(x4)
# Note that tf.nn.bias_add supports up to 5 dimensions.
b = self._ConstOp((input_matrix_columns, 48))
x5 = math_ops.matmul(x, b)
b = self._ConstOp((48,))
x5 = nn.bias_add(x5, b)
x5 = gen_array_ops.reshape(x5, [4, -1])
x6 = gen_array_ops.reshape(x, [4, 24, 6])
b = self._ConstOp((6,))
x6 = nn.bias_add(x6, b, data_format="NHWC")
x6 = gen_array_ops.reshape(x6, [4, -1])
x7 = gen_array_ops.reshape(x, [4, 12, 4, 3])
b = self._ConstOp((3,))
x7 = nn.bias_add(x7, b, data_format="NHWC")
x7 = gen_array_ops.reshape(x7, [4, -1])
x8 = gen_array_ops.reshape(x, [4, 4, 3, 2, 6])
b = self._ConstOp((6,))
x8 = nn.bias_add(x8, b, data_format="NHWC")
x8 = gen_array_ops.reshape(x8, [4, -1])
x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((12,))
x9 = nn.bias_add(x9, b, data_format="NCHW")
x9 = gen_array_ops.reshape(x9, [4, -1])
x10 = gen_array_ops.reshape(x, [4, 3, 4, 12])
b = self._ConstOp((3,))
x10 = nn.bias_add(x10, b, data_format="NCHW")
x10 = gen_array_ops.reshape(x10, [4, -1])
x11 = gen_array_ops.reshape(x, [4, 6, 24])
b = self._ConstOp((6,))
x11 = nn.bias_add(x11, b, data_format="NCHW")
x11 = gen_array_ops.reshape(x11, [4, -1])
out = array_ops.concat([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11],
axis=-1)
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[4, 144]],
[[4, 6680]])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
conversion_params = super(BiasaddMatMulTest,
self).GetConversionParams(run_params)
conversion_params._replace(max_batch_size=4, maximum_cached_engines=1)
rewrite_config_with_trt = self.GetTrtRewriterConfig(
run_params=run_params,
conversion_params=conversion_params,
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimensional input.
disable_non_trt_optimizers=True)
return conversion_params._replace(
rewriter_config_template=rewrite_config_with_trt)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
| apache-2.0 |
beeftornado/sentry | src/sentry/api/endpoints/project_key_stats.py | 3 | 2056 | from __future__ import absolute_import
import six
from collections import OrderedDict
from django.db.models import F
from rest_framework.response import Response
from sentry import tsdb
from sentry.api.base import StatsMixin
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import ProjectKey
class ProjectKeyStatsEndpoint(ProjectEndpoint, StatsMixin):
def get(self, request, project, key_id):
try:
key = ProjectKey.objects.get(
project=project, public_key=key_id, roles=F("roles").bitor(ProjectKey.roles.store)
)
except ProjectKey.DoesNotExist:
raise ResourceDoesNotExist
try:
stat_args = self._parse_args(request)
except ValueError:
return Response({"detail": "Invalid request data"}, status=400)
stats = OrderedDict()
for model, name in (
(tsdb.models.key_total_received, "total"),
(tsdb.models.key_total_blacklisted, "filtered"),
(tsdb.models.key_total_rejected, "dropped"),
):
# XXX (alex, 08/05/19) key stats were being stored under either key_id or str(key_id)
# so merge both of those back into one stats result.
result = tsdb.get_range(model=model, keys=[key.id, six.text_type(key.id)], **stat_args)
for key_id, points in six.iteritems(result):
for ts, count in points:
bucket = stats.setdefault(int(ts), {})
bucket.setdefault(name, 0)
bucket[name] += count
return Response(
[
{
"ts": ts,
"total": data["total"],
"dropped": data["dropped"],
"filtered": data["filtered"],
"accepted": data["total"] - data["dropped"] - data["filtered"],
}
for ts, data in six.iteritems(stats)
]
)
| bsd-3-clause |
javachengwc/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/shared/password_hasher.py | 118 | 1850 | # file openpyxl/shared/password_hasher.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Basic password hashing."""
def hash_password(plaintext_password=''):
"""Create a password hash from a given string.
This method is based on the algorithm provided by
Daniel Rentz of OpenOffice and the PEAR package
Spreadsheet_Excel_Writer by Xavier Noguer <[email protected]>.
"""
password = 0x0000
i = 1
for char in plaintext_password:
value = ord(char) << i
rotated_bits = value >> 15
value &= 0x7fff
password ^= (value | rotated_bits)
i += 1
password ^= len(plaintext_password)
password ^= 0xCE4B
return str(hex(password)).upper()[2:]
| apache-2.0 |
denisff/python-for-android | python-modules/twisted/twisted/conch/test/test_openssh_compat.py | 60 | 3381 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.openssh_compat}.
"""
import os
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.python.compat import set
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
OpenSSHFactory = None
else:
from twisted.conch.openssh_compat.factory import OpenSSHFactory
from twisted.conch.test import keydata
from twisted.test.test_process import MockOS
class OpenSSHFactoryTests(TestCase):
"""
Tests for L{OpenSSHFactory}.
"""
if getattr(os, "geteuid", None) is None:
skip = "geteuid/seteuid not available"
elif OpenSSHFactory is None:
skip = "Cannot run without PyCrypto or PyASN1"
def setUp(self):
self.factory = OpenSSHFactory()
self.keysDir = FilePath(self.mktemp())
self.keysDir.makedirs()
self.factory.dataRoot = self.keysDir.path
self.keysDir.child("ssh_host_foo").setContent("foo")
self.keysDir.child("bar_key").setContent("foo")
self.keysDir.child("ssh_host_one_key").setContent(
keydata.privateRSA_openssh)
self.keysDir.child("ssh_host_two_key").setContent(
keydata.privateDSA_openssh)
self.keysDir.child("ssh_host_three_key").setContent(
"not a key content")
self.keysDir.child("ssh_host_one_key.pub").setContent(
keydata.publicRSA_openssh)
self.mockos = MockOS()
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def test_getPublicKeys(self):
"""
L{OpenSSHFactory.getPublicKeys} should return the available public keys
in the data directory
"""
keys = self.factory.getPublicKeys()
self.assertEquals(len(keys), 1)
keyTypes = keys.keys()
self.assertEqual(keyTypes, ['ssh-rsa'])
def test_getPrivateKeys(self):
"""
L{OpenSSHFactory.getPrivateKeys} should return the available private
keys in the data directory.
"""
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_getPrivateKeysAsRoot(self):
"""
L{OpenSSHFactory.getPrivateKeys} should switch to root if the keys
aren't readable by the current user.
"""
keyFile = self.keysDir.child("ssh_host_two_key")
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = os.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.patch(os, "seteuid", seteuid)
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [0, os.geteuid()])
self.assertEquals(self.mockos.setegidCalls, [0, os.getegid()])
| apache-2.0 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/python/release.py | 52 | 1107 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A release-automation toolkit.
Don't use this outside of Twisted.
Maintainer: Christopher Armstrong
"""
import os
# errors
class DirectoryExists(OSError):
"""
Some directory exists when it shouldn't.
"""
pass
class DirectoryDoesntExist(OSError):
"""
Some directory doesn't exist when it should.
"""
pass
class CommandFailed(OSError):
pass
# utilities
def sh(command, null=True, prompt=False):
"""
I'll try to execute C{command}, and if C{prompt} is true, I'll
ask before running it. If the command returns something other
than 0, I'll raise C{CommandFailed(command)}.
"""
print "--$", command
if prompt:
if raw_input("run ?? ").startswith('n'):
return
if null:
command = "%s > /dev/null" % command
if os.system(command) != 0:
raise CommandFailed(command)
def runChdirSafe(f, *args, **kw):
origdir = os.path.abspath('.')
try:
return f(*args, **kw)
finally:
os.chdir(origdir)
| gpl-2.0 |
leekchan/django_test | django/contrib/admindocs/utils.py | 23 | 4114 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.parser import HeaderParser
from email.errors import HeaderParseError
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trim leading/trailing whitespace from docstrings.
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip())
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Return (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform': True,
'inital_header_level': 3,
"default_reference_context": default_reference_context,
"link_base": reverse('django-admindocs-docroot').rstrip('/')
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
# Wrap ``text`` in some reST that sets the default role to ``cmsreference``,
# then restores it.
source = """
.. default-role:: cmsreference
%s
.. default-role::
"""
parts = docutils.core.publish_parts(source % text,
source_path=thing_being_parsed, destination_path=None,
writer_name='html', settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model': '%s/models/%s/',
'view': '%s/views/%s/',
'template': '%s/templates/%s/',
'filter': '%s/filters/#%s',
'tag': '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
node = docutils.nodes.reference(
rawtext,
text,
refuri=(urlbase % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(
rawtext,
text,
refuri=(ROLES[context] % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| bsd-3-clause |
Rundll/django-mailer-2 | django_mailer/lockfile.py | 179 | 15120 |
"""
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = FileLock('somefile')
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print 'somefile', 'is locked already.'
... except LockFailed:
... print 'somefile', 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = FileLock('somefile')
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
"""
from __future__ import division
import sys
import socket
import os
import thread
import threading
import time
import errno
import urllib
# Work with PEP8 and non-PEP8 versions of threading module.
if not hasattr(threading, "current_thread"):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, "get_name"):
threading.Thread.get_name = threading.Thread.getName
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock']
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class LockBase:
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
self.path = path
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
name = threading.current_thread().get_name()
tname = "%s-" % urllib.quote(name, safe="")
else:
tname = ""
dirname = os.path.dirname(self.lock_file)
self.unique_name = os.path.join(dirname,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
class LinkFileLock(LockBase):
"""Lock access to a file using atomic property of link(2)."""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
class MkdirFileLock(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirFileLock('somefile')
>>> lock = MkdirFileLock('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
if threaded:
tname = "%x-" % thread.get_ident()
else:
tname = ""
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
class SQLiteFileLock(LockBase):
"Demonstration of using same SQL-based locking."
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
def __init__(self, path, threaded=True):
LockBase.__init__(self, path, threaded)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
import sqlite3
self.connection = sqlite3.connect(SQLiteFileLock.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteFileLock.testdb)
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked
if not self.i_am_locking():
raise NotMyLock((self._who_is_locking(), self.unique_name))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
if hasattr(os, "link"):
FileLock = LinkFileLock
else:
FileLock = MkdirFileLock
| mit |
beebotte/ISS-realtime-position | iss_position_pub.py | 1 | 3041 | #!/usr/bin/python
# coding: utf8
############################################################
# Author Bachar Wehbi <[email protected]>
# Copyright (c) 2013-2014 Beebotte <[email protected]>
# This program is published under the MIT License
# Check http://opensource.org/licenses/MIT for details.
#
# This code uses the Beebotte API, you must have an account.
# You can register here: http://beebotte.com/register
#
# This program computes the position of the ISS in real-time
# and publishes it to Beebotte.
#
# Use the pip package manager to install dependencies:
# $ pip install pyephem
# $ pip install beebotte
############################################################
import time
from beebotte import *
import ephem
import datetime
import urllib2
from math import degrees
### URL where we will fetch TLE data
url = "http://www.celestrak.com/NORAD/elements/stations.txt"
### Replace CHENNL_TOKEN with that of your channel's (this code assumes the channel name is "ISS")
CHANNEL_TOKEN = None
bbt = BBT(token = CHANNEL_TOKEN)
### Otherwise, use your Access and Secret keys to connect to Beebotte
### Replace ACCESS_KEY and SECRET_KEY with those of your account
# ACCESS_KEY = None
# SECRET_KEY = None
# bbt = BBT(ACCESS_KEY, SECRET_KEY)
### Change channel name and resource name as suits you
iss_position_resource = Resource(bbt, 'ISS', 'position')
iss = None
count = 0
def update_tle():
global iss
### This is what TLE looks like. It will be updated every hour
# line1 = "ISS (ZARYA)"
# line2 = "1 25544U 98067A 16070.60802946 .00010558 00000-0 16731-3 0 9999"
# line3 = "2 25544 51.6423 189.6478 0001642 260.2328 233.0609 15.53995147989640"
try:
### Fetch and extract ISS TLE data
req = urllib2.Request(url)
response = urllib2.urlopen(req)
data = response.read()
tle = data.split('\n')[0:3]
if len(tle) >= 3:
line1 = tle[0]
line2 = tle[1]
line3 = tle[2]
iss = ephem.readtle(line1, line2, line3)
except Exception as inst:
print type(inst) ### the exception instance
print inst.args ### arguments stored in .args
print inst ###
def run():
global count
update_tle()
while True:
### update the TLE data once per hour
if count > 3600:
update_tle()
count = 0
count += 1
try:
### compute the ISS position
now = datetime.datetime.utcnow()
iss.compute(now)
print('longitude: %f - latitude: %f' % (degrees(iss.sublong), degrees(iss.sublat)))
### Send temperature to Beebotte
iss_position_resource.publish({
"timestamp": round(time.time()),
### transform longitude and latitude to degrees
"position": {
"long": degrees(iss.sublong),
"lat": degrees(iss.sublat)
}
})
except Exception as inst:
print type(inst) ### the exception instance
print inst.args ### arguments stored in .args
print inst ###
### sleep some time
time.sleep( 1 )
run()
| mit |
ansible/ansible-modules-core | network/netvisor/pn_vlan.py | 29 | 8941 | #!/usr/bin/python
""" PN CLI vlan-create/vlan-delete """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: pn_vlan
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to create/delete a VLAN.
description:
- Execute vlan-create or vlan-delete command.
- VLANs are used to isolate network traffic at Layer 2.The VLAN identifiers
0 and 4095 are reserved and cannot be used per the IEEE 802.1Q standard.
The range of configurable VLAN identifiers is 2 through 4092.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to create vlan and
'absent' to delete vlan.
required: True
choices: ['present', 'absent']
pn_vlanid:
description:
- Specify a VLAN identifier for the VLAN. This is a value between
2 and 4092.
required: True
pn_scope:
description:
- Specify a scope for the VLAN.
- Required for vlan-create.
choices: ['fabric', 'local']
pn_description:
description:
- Specify a description for the VLAN.
pn_stats:
description:
- Specify if you want to collect statistics for a VLAN. Statistic
collection is enabled by default.
pn_ports:
description:
- Specifies the switch network data port number, list of ports, or range
of ports. Port numbers must ne in the range of 1 to 64.
pn_untagged_ports:
description:
- Specifies the ports that should have untagged packets mapped to the
VLAN. Untagged packets are packets that do not contain IEEE 802.1Q VLAN
tags.
"""
EXAMPLES = """
- name: create a VLAN
pn_vlan:
state: 'present'
pn_vlanid: 1854
pn_scope: fabric
- name: delete VLANs
pn_vlan:
state: 'absent'
pn_vlanid: 1854
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vlan command.
returned: always
type: list
stderr:
description: The set of error responses from the vlan command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VLAN_EXISTS = None
MAX_VLAN_ID = 4092
MIN_VLAN_ID = 2
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the vlan-show command.
If a vlan with given vlan id exists, return VLAN_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VLAN_EXISTS
"""
vlanid = module.params['pn_vlanid']
show = cli + \
' vlan-show id %s format id,scope no-show-headers' % str(vlanid)
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global VLAN_EXISTS
if str(vlanid) in out:
VLAN_EXISTS = True
else:
VLAN_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state= module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vlan-create'
if state == 'absent':
command = 'vlan-delete'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent']),
pn_vlanid=dict(required=True, type='int'),
pn_scope=dict(type='str', choices=['fabric', 'local']),
pn_description=dict(type='str'),
pn_stats=dict(type='bool'),
pn_ports=dict(type='str'),
pn_untagged_ports=dict(type='str')
),
required_if=(
["state", "present", ["pn_vlanid", "pn_scope"]],
["state", "absent", ["pn_vlanid"]]
)
)
# Accessing the arguments
state = module.params['state']
vlanid = module.params['pn_vlanid']
scope = module.params['pn_scope']
description = module.params['pn_description']
stats = module.params['pn_stats']
ports = module.params['pn_ports']
untagged_ports = module.params['pn_untagged_ports']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if not MIN_VLAN_ID <= vlanid <= MAX_VLAN_ID:
module.exit_json(
msg="VLAN id must be between 2 and 4092",
changed=False
)
if command == 'vlan-create':
check_cli(module, cli)
if VLAN_EXISTS is True:
module.exit_json(
skipped=True,
msg='VLAN with id %s already exists' % str(vlanid)
)
cli += ' %s id %s scope %s ' % (command, str(vlanid), scope)
if description:
cli += ' description ' + description
if stats is True:
cli += ' stats '
if stats is False:
cli += ' no-stats '
if ports:
cli += ' ports ' + ports
if untagged_ports:
cli += ' untagged-ports ' + untagged_ports
if command == 'vlan-delete':
check_cli(module, cli)
if VLAN_EXISTS is False:
module.exit_json(
skipped=True,
msg='VLAN with id %s does not exist' % str(vlanid)
)
cli += ' %s id %s ' % (command, str(vlanid))
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
antoan2/incubator-mxnet | python/mxnet/module/sequential_module.py | 38 | 17331 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments, too-many-locals, too-many-instance-attributes
"""`SequentialModule` is a container module that chains a number of modules together."""
import logging
import copy
from ..initializer import Uniform
from .base_module import BaseModule
class SequentialModule(BaseModule):
"""A SequentialModule is a container module that can chain multiple modules together.
.. note::
Building a computation graph with this kind of imperative container is less
flexible and less efficient than the symbolic graph. So, this should be only used as a
handy utility.
"""
META_TAKE_LABELS = 'take_labels'
META_AUTO_WIRING = 'auto_wiring'
def __init__(self, logger=logging):
super(SequentialModule, self).__init__(logger=logger)
self._modules = []
self._metas = []
self._label_shapes = None
self._data_shapes = None
self._meta_keys = set([getattr(SequentialModule, x)
for x in dir(SequentialModule)
if x.startswith('META_')])
def add(self, module, **kwargs):
"""Adds a module to the chain.
Parameters
----------
module : BaseModule
The new module to add.
kwargs : **keywords
All the keyword arguments are saved as meta information
for the added module. The currently known meta includes
- `take_labels`: indicating whether the module expect to
take labels when doing computation. Note any module in
the chain can take labels (not necessarily only the top
most one), and they all take the same labels passed
from the original data batch for the `SequentialModule`.
Returns
-------
self
This function returns `self` to allow us to easily chain a
series of `add` calls.
Examples
--------
>>> # An example of addinging two modules to a chain.
>>> seq_mod = mx.mod.SequentialModule()
>>> seq_mod.add(mod1)
>>> seq_mod.add(mod2)
"""
self._modules.append(module)
# a sanity check to avoid typo
for key in kwargs:
assert key in self._meta_keys, ('Unknown meta "%s", a typo?' % key)
self._metas.append(kwargs)
# after adding new modules, we are reset back to raw states, needs
# to bind, init_params, etc.
self.binded = False
self.params_initialized = False
self.optimizer_initialized = False
return self # for easier chaining
@property
def data_names(self):
"""A list of names for data required by this module."""
if len(self._modules) > 0:
return self._modules[0].data_names
return []
@property
def output_names(self):
"""A list of names for the outputs of this module."""
if len(self._modules) > 0:
return self._modules[-1].output_names
return []
@property
def data_shapes(self):
"""Gets data shapes.
Returns
-------
list
A list of `(name, shape)` pairs. The data shapes of the first module
is the data shape of a `SequentialModule`.
"""
assert self.binded
return self._modules[0].data_shapes
@property
def label_shapes(self):
"""Gets label shapes.
Returns
-------
list
A list of `(name, shape)` pairs. The return value could be `None` if
the module does not need labels, or if the module is not bound for
training (in this case, label information is not available).
"""
assert self.binded
return self._label_shapes
@property
def output_shapes(self):
"""Gets output shapes.
Returns
-------
list
A list of `(name, shape)` pairs. The output shapes of the last
module is the output shape of a `SequentialModule`.
"""
assert self.binded
return self._modules[-1].output_shapes
def get_params(self):
"""Gets current parameters.
Returns
-------
(arg_params, aux_params)
A pair of dictionaries each mapping parameter names to NDArray values. This
is a merged dictionary of all the parameters in the modules.
"""
assert self.binded and self.params_initialized
arg_params = dict()
aux_params = dict()
for module in self._modules:
arg, aux = module.get_params()
arg_params.update(arg)
aux_params.update(aux)
return (arg_params, aux_params)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Default ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Default ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Default ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
for module in self._modules:
module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
# make sure we do not have duplicated parameter names
def _check_name(known_names, new_names, modules, i):
"""Internal function to help checking duplicated names."""
for name in new_names:
assert not name in known_names, "Duplicated parameter names: " + \
('name "%s" in layer %d (%s) is already ' % (name, i, type(modules[i]))) + \
('used in layer %d (%s).' % (known_names[name],
type(modules[known_names[name]])))
known_names[name] = i
arg_names = dict()
aux_names = dict()
for i_layer, module in enumerate(self._modules):
arg_params, aux_params = module.get_params()
_check_name(arg_names, arg_params.keys(), self._modules, i_layer)
_check_name(aux_names, aux_params.keys(), self._modules, i_layer)
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. Currently shared module is not supported for `SequentialModule`.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
if inputs_need_grad:
assert for_training is True
assert shared_module is None, 'Shared module is not supported'
assert len(self._modules) > 0, 'Attempting to bind an empty SequentialModule'
self.binded = True
# the same label shapes are used for all chained modules
self._label_shapes = label_shapes
my_data_shapes = data_shapes
anybody_ever_needs_label = False
for i_layer, module in enumerate(self._modules):
meta = self._metas[i_layer]
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
my_label_shapes = label_shapes
anybody_ever_needs_label = True
else:
my_label_shapes = None
my_inputs_need_grad = bool(inputs_need_grad or
(for_training and i_layer > 0))
if meta.get(SequentialModule.META_AUTO_WIRING, False):
data_names = module.data_names
assert len(data_names) == len(my_data_shapes)
my_data_shapes = [(new_name, shape) for (new_name, (_, shape))
in zip(data_names, my_data_shapes)]
module.bind(data_shapes=my_data_shapes, label_shapes=my_label_shapes,
for_training=for_training, inputs_need_grad=my_inputs_need_grad,
force_rebind=force_rebind, shared_module=None, grad_req=grad_req)
# the output of the previous module is the data of the next module
my_data_shapes = module.output_shapes
if not anybody_ever_needs_label:
# then I do not need label either
self._label_shapes = None
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
for module in self._modules:
module.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params, force_init=force_init)
self.optimizer_initialized = True
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Default is ``None``, in which case `is_train` is take as ``self.for_training``.
"""
assert self.binded and self.params_initialized
# make a shallow copy, just to maintain necessary properties (if any) like
# bucket_key, pad, etc.
data_batch = copy.copy(data_batch)
for i_layer, module in enumerate(self._modules):
module.forward(data_batch, is_train=is_train)
if i_layer+1 == len(self._modules):
# the last layer, do not need to do the followings
break
data_batch.data = module.get_outputs()
if hasattr(data_batch, 'provide_data'):
# need to update this, in case the internal module is using bucketing
# or whatever
data_names = [x[0] for x in module.output_shapes]
assert len(data_names) == len(data_batch.data)
data_batch.provide_data = [(name, x.shape) for name, x in
zip(data_names, data_batch.data)]
def backward(self, out_grads=None):
"""Backward computation."""
assert self.binded and self.params_initialized
for i_layer, module in reversed(list(zip(range(len(self._modules)), self._modules))):
module.backward(out_grads=out_grads)
if i_layer == 0:
break
out_grads = module.get_input_grads()
def update(self):
"""Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
for module in self._modules:
module.update()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
If `merge_multi_context` is ``True``, it is like ``[out1,
out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1,
out2_dev2]]``. All the output elements are numpy arrays.
"""
assert self.binded and self.params_initialized
return self._modules[-1].get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._modules[0].get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
assert self.binded and self.params_initialized
for meta, module in zip(self._metas, self._modules):
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
module.update_metric(eval_metric, labels)
def install_monitor(self, mon):
"""Installs monitor on all executors."""
assert self.binded
for module in self._modules:
module.install_monitor(mon)
| apache-2.0 |
yeming233/rally | tests/unit/plugins/openstack/scenarios/ceilometer/test_stats.py | 2 | 1757 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.ceilometer import stats
from tests.unit import test
class CeilometerStatsTestCase(test.ScenarioTestCase):
def test_get_stats(self):
scenario = stats.GetStats(self.context)
scenario._get_stats = mock.MagicMock()
context = {"user": {"tenant_id": "fake", "id": "fake_id"},
"tenant": {"id": "fake_id",
"resources": ["fake_resource"]}}
metadata_query = {"a": "test"}
period = 10
groupby = "user_id"
aggregates = "sum"
scenario.context = context
scenario.run("fake_meter", True, True, True, metadata_query,
period, groupby, aggregates)
scenario._get_stats.assert_called_once_with(
"fake_meter",
[{"field": "user_id", "value": "fake_id", "op": "eq"},
{"field": "project_id", "value": "fake_id", "op": "eq"},
{"field": "resource_id", "value": "fake_resource", "op": "eq"},
{"field": "metadata.a", "value": "test", "op": "eq"}],
10,
"user_id",
"sum"
)
| apache-2.0 |
stevenbaker/dotfiles | .vim/bundle/jedi-vim/jedi/test/completion/descriptors.py | 14 | 2662 | class RevealAccess(object):
"""
A data descriptor that sets and returns values
normally and prints a message logging their access.
"""
def __init__(self, initval=None, name='var'):
self.val = initval
self.name = name
def __get__(self, obj, objtype):
print('Retrieving', self.name)
return self.val
def __set__(self, obj, val):
print('Updating', self.name)
self.val = val
def just_a_method(self):
pass
class C(object):
x = RevealAccess(10, 'var "x"')
#? RevealAccess()
x
#? ['just_a_method']
x.just_a_method
y = 5.0
def __init__(self):
#? int()
self.x
#? []
self.just_a_method
#? []
C.just_a_method
m = C()
#? int()
m.x
#? float()
m.y
#? int()
C.x
#? []
m.just_a_method
#? []
C.just_a_method
# -----------------
# properties
# -----------------
class B():
@property
def r(self):
return 1
@r.setter
def r(self, value):
return ''
def t(self):
return ''
p = property(t)
#? []
B().r()
#? int()
B().r
#? str()
B().p
#? []
B().p()
class PropClass():
def __init__(self, a):
self.a = a
@property
def ret(self):
return self.a
@ret.setter
def ret(self, value):
return 1.0
def ret2(self):
return self.a
ret2 = property(ret2)
@property
def nested(self):
""" causes recusions in properties, should work """
return self.ret
@property
def nested2(self):
""" causes recusions in properties, should not work """
return self.nested2
@property
def join1(self):
""" mutual recusion """
return self.join2
@property
def join2(self):
""" mutual recusion """
return self.join1
#? str()
PropClass("").ret
#? []
PropClass().ret.
#? str()
PropClass("").ret2
#?
PropClass().ret2
#? int()
PropClass(1).nested
#? []
PropClass().nested.
#?
PropClass(1).nested2
#? []
PropClass().nested2.
#?
PropClass(1).join1
# -----------------
# staticmethod/classmethod
# -----------------
class E(object):
a = ''
def __init__(self, a):
self.a = a
def f(x):
return x
f = staticmethod(f)
@staticmethod
def g(x):
return x
def s(cls, x):
return x
s = classmethod(s)
@classmethod
def t(cls, x):
return x
@classmethod
def u(cls, x):
return cls.a
e = E(1)
#? int()
e.f(1)
#? int()
E.f(1)
#? int()
e.g(1)
#? int()
E.g(1)
#? int()
e.s(1)
#? int()
E.s(1)
#? int()
e.t(1)
#? int()
E.t(1)
#? str()
e.u(1)
#? str()
E.u(1)
| mit |
Nofe92/srcdemo2 | launcher/SrcDemo2Launcher.py | 8 | 10302 | import sys
import os
import re
import time
import base64
import tempfile
import subprocess
import threading
if __name__ == '__main__':
print 'Please do not launch this file directly.'
sys.exit(0)
def module_path():
if hasattr(sys, "frozen"):
return os.path.dirname(sys.executable)
return os.path.dirname(__file__)
selfDir = os.path.abspath(module_path())
allowedCommands = {}
def addCommand(commandName, command):
global allowedCommands
allowedCommands[commandName] = command
stringRe = re.compile(r'"((?:[^"\\]|\\.)*)"')
def parse_command(command):
global stringRe, allowedCommands
command = base64.b64decode(command).decode('utf8')
allStrings = stringRe.findall(command)
if not allStrings:
return
allStrings = [x.replace(u'\\"', u'"').replace(u'\\\\', u'\\') for x in allStrings]
commandName = allStrings[0]
arguments = allStrings[1:]
if commandName in allowedCommands:
debugPrint('[C] Executing command', commandName, 'with arguments', arguments)
try:
allowedCommands[commandName](*arguments)
except:
debugPrint('Error while running command', commandName, 'with arguments', arguments)
class StreamRunner(threading.Thread):
def __init__(self, process, streamIn, streamsOut, parseCommands=False, showCommands=False):
self.process = process
self.streamIn = streamIn
self.streamsOut = streamsOut
self.parseCommands = parseCommands
self.showCommands = showCommands
threading.Thread.__init__(self)
def run(self):
while self.process.poll() is None:
l = self.streamIn.readline()
if self.parseCommands and len(l) > 4 and l[:3] == '[C]':
if self.showCommands:
for s in self.streamsOut:
s.write(l)
parse_command(l[4:])
else:
for s in self.streamsOut:
s.write(l)
def which(program):
import os
def is_executable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath and is_executable(program):
return program
elif 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
executable_file = os.path.join(path, program)
if is_executable(executable_file):
return executable_file
return None
def is_windows():
return sys.platform[:3].lower() == 'win'
def is_osx():
return sys.platform.lower().find('darwin') != -1 or sys.platform.lower().find('osx') != -1
def get_java(debugMode):
if is_windows():
hiPriority = 'java.exe'
loPriority = 'javaw.exe'
debugPrint('Finding', hiPriority, '/', loPriority)
def findJre(d):
if not os.path.exists(d) or not os.path.isdir(d):
return None
found = None
for i in os.listdir(d):
f = d + os.sep + i
if os.path.isdir(f):
res = findJre(f)
if res is not None:
return res
elif i.lower() == hiPriority:
return f # Immediately return
elif i.lower() == loPriority:
found = f # Keep looking for the other, just in case
return found
lookIn=[selfDir]
if 'PROGRAMFILES(X86)' in os.environ:
lookIn.append(os.environ['PROGRAMFILES(X86)'] + os.sep + 'Oracle')
lookIn.append(os.environ['PROGRAMFILES(X86)'] + os.sep + 'Java')
if 'PROGRAMFILES' in os.environ:
lookIn.append(os.environ['PROGRAMFILES'] + os.sep + 'Oracle')
lookIn.append(os.environ['PROGRAMFILES'] + os.sep + 'Java')
foundJre = None
for p in lookIn:
foundJre = findJre(p)
if foundJre is not None:
return foundJre
elif is_osx():
return selfDir + '/jre-1.7.0/bin/java'
else:
return which('java')
return None
def add_subprocess_creationflags(kwargs):
if is_windows():
import win32process
kwargs['creationflags'] = win32process.CREATE_NO_WINDOW
return kwargs
def subprocess_call(command, *args, **kwargs):
args = args[:]
kwargs = add_subprocess_creationflags(kwargs.copy())
kwargs['stdin'] = subprocess.PIPE
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
return subprocess.call(command, *args, **kwargs)
def subprocess_getoutput(command, *args, **kwargs):
args = args[:]
kwargs = add_subprocess_creationflags(kwargs.copy())
kwargs['stdin'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
return subprocess.check_output(command, *args, **kwargs)
def attempt_unmount(mountpoint):
global selfDir
mountpoint = mountpoint.encode(sys.getfilesystemencoding())
if is_windows():
subprocess_call([selfDir + os.sep + 'tools' + os.sep + 'windows' + os.sep + 'dokanctl' + os.sep + 'dokanctl.exe', '/u', mountpoint, '/f'])
else:
try:
subprocess_call(['fusermount', '-u', mountpoint])
except:
pass
try:
subprocess_call(['umount', mountpoint])
except:
pass
addCommand('unmount', attempt_unmount)
lastMountPoint = None
def register_mountpoint(mountpoint):
global lastMountPoint
lastMountPoint = mountpoint
addCommand('register_mountpoint', register_mountpoint)
def unmount_registered_mountpoint():
global lastMountPoint
if lastMountPoint is not None:
debugPrint('Attempting unmount of', lastMountPoint)
attempt_unmount(lastMountPoint)
def addJvmArgument(printFlags, jvmArgs, default, prefix=None, xxArg=None):
if prefix is not None:
for i in jvmArgs:
if len(i) > len(prefix) and i[:len(prefix)] == prefix:
return
jvmArgs.append(prefix + default)
elif xxArg is not None and printFlags is not None and xxArg in printFlags:
for i in jvmArgs:
if len(i) > 4 and i[:4] == '-XX:' and xxArg in i:
return
jvmArgs.append('-XX:' + default)
debugMode = False
def debugPrint(*args):
global debugMode
if debugMode:
try:
print ' '.join(map(str, args))
except:
try:
print args
except:
try:
print 'Could not print line! Something is very bad.'
except:
pass # Now it's really really bad
def launch(inDebugMode=False):
global selfDir, debugMode
debugMode = inDebugMode or '--srcdemo-debug' in sys.argv[1:]
debugPrint('Debug mode enabled.')
foundJre = get_java(debugMode)
if foundJre is None:
debugPrint('JRE not found.')
if is_windows():
import win32api
win32api.MessageBox(0, 'A 32-bit Java runtime environment (JRE) was not found.\nPlease download it from http://java.com/.\nEven if you are on 64-bit Windows, this program needs a 32-bit Java runtime to run.\n\nIf you are sure you have installed it already, please copy the jre folder next to SrcDemo2.exe.', 'Java not found.')
return
else:
print 'The Java runtime environment was not found.'
sys.exit(1)
if type(foundJre) is not type([]):
foundJre = [foundJre]
javaHome = os.path.abspath(os.path.dirname(os.path.dirname(foundJre[0])))
javaEnv = os.environ.copy()
javaEnv['JAVA_HOME'] = javaHome
javaVmArgs = []
if is_osx():
foundJre.append('-d64')
javaVmArgs.append('-XstartOnFirstThread')
for i in sys.argv[1:]:
if len(i) > 11 and i[:11] == '--jvm-args=':
javaVmArgs.extend(i[11:].split(' '))
jvmType = '-client'
if '-server' not in javaVmArgs and '-client' not in javaVmArgs:
# Probe for server JVM
if subprocess_call(foundJre + ['-server', '-version']) == 0:
jvmType = '-server'
javaVmArgs = ['-server'] + javaVmArgs
# Get available flags
printFlags = None
try:
printFlags = subprocess_getoutput(foundJre + [jvmType, '-XX:+PrintFlagsFinal'])
except:
pass
addJvmArgument(printFlags, javaVmArgs, '1024M', prefix='-Xmx')
addJvmArgument(printFlags, javaVmArgs, '512k', prefix='-Xss')
addJvmArgument(printFlags, javaVmArgs, ':none', prefix='-Xverify')
addJvmArgument(printFlags, javaVmArgs, '+UseParallelGC', xxArg='GC')
addJvmArgument(printFlags, javaVmArgs, '+AggressiveOpts', xxArg='AggressiveOpts')
addJvmArgument(printFlags, javaVmArgs, '+UseFastAccessorMethods', xxArg='UseFastAccessorMethods')
if jvmType == '-server':
addJvmArgument(printFlags, javaVmArgs, '+UseStringCache', xxArg='UseStringCache')
addJvmArgument(printFlags, javaVmArgs, '+UseCompressedStrings', xxArg='UseCompressedStrings')
addJvmArgument(printFlags, javaVmArgs, '+OptimizeStringConcat', xxArg='OptimizeStringConcat')
addJvmArgument(printFlags, javaVmArgs, 'CompileThreshold=100', xxArg='CompileThreshold')
del printFlags
command = foundJre + javaVmArgs + ['-jar', 'SrcDemo2.jar']
outStreams = [sys.stdout]
errStreams = []
if debugMode:
errStreams.append(sys.stderr)
command.append('--srcdemo-debug')
print 'Debug mode allows the console output to be logged to a file.'
print 'You may enter the complete path of the file to log to below.'
print 'Make sure it is writable (i.e. don\'t put it in the installation directory).'
print 'If you don\'t want the output to be logged, leave the line blank.'
print 'If you\'re not sure what to type, type "?" and SrcDemo2 will guess a filename for you.'
while True:
logFile = raw_input('Log file (blank to not log, "?" for auto): ').strip()
if logFile:
if logFile in (u'"?"', u'?'):
logFile = tempfile.mkstemp(suffix='.log', prefix='srcdemo2-' + time.strftime('%Y-%m-%d-at-%H-%M-%S') + '-', text=False)
os.close(logFile[0])
logFile = logFile[1]
print 'Guessed log file:', logFile
try:
logHandle = open(logFile, 'wb')
logHandle.write(u'Opened log.\n'.encode('utf8'))
outStreams.append(logHandle)
errStreams.append(logHandle)
print 'Log file:', logFile
break
except:
print 'Couldn\'t open this file for writing.'
print 'Please make sure the file is writable.'
else:
break
else:
errStreams.append(sys.stdout)
command.append('--srcdemo-jvm' + jvmType)
command.extend(sys.argv[1:])
returnCode = 0
kwargs = add_subprocess_creationflags({
'cwd': selfDir,
'env': javaEnv,
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE
})
while True:
debugPrint('Running', command)
p = subprocess.Popen(command, **kwargs)
p.stdin.close()
StreamRunner(p, p.stdout, outStreams, parseCommands=True, showCommands=inDebugMode).start()
StreamRunner(p, p.stderr, errStreams).start()
try:
returnCode = p.wait()
except KeyboardInterrupt:
debugPrint('Got keyboard interrupt.')
returnCode = 0
unmount_registered_mountpoint()
break
debugPrint('Process finished with return code:', returnCode)
unmount_registered_mountpoint()
if returnCode != 57:
break
debugPrint('Done.')
if returnCode:
sys.exit(returnCode)
| bsd-2-clause |
stackforge/cloudbase-init | cloudbaseinit/tests/metadata/services/test_maasservice.py | 1 | 17075 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import exception
from cloudbaseinit.metadata.services import maasservice
from cloudbaseinit.models import network as network_model
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils import x509constants
CONF = cloudbaseinit_conf.CONF
class MaaSHttpServiceTest(unittest.TestCase):
def setUp(self):
self._maasservice = maasservice.MaaSHttpService()
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_cache_data")
def _test_load(self, mock_get_cache_data, ip, cache_data_fails=False):
if cache_data_fails:
mock_get_cache_data.side_effect = Exception
with testutils.ConfPatcher('metadata_base_url', ip, "maas"):
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'maasservice') as snatcher:
response = self._maasservice.load()
if ip is not None:
if not cache_data_fails:
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/' % self._maasservice._metadata_version)
self.assertTrue(response)
else:
expected_logging = 'Metadata not found at URL \'%s\'' % ip
self.assertEqual(expected_logging, snatcher.output[-1])
else:
self.assertFalse(response)
def test_load(self):
self._test_load(ip='196.254.196.254')
def test_load_no_ip(self):
self._test_load(ip=None)
def test_load_get_cache_data_fails(self):
self._test_load(ip='196.254.196.254', cache_data_fails=True)
@testutils.ConfPatcher('oauth_consumer_key', 'consumer_key', "maas")
@testutils.ConfPatcher('oauth_consumer_secret', 'consumer_secret', "maas")
@testutils.ConfPatcher('oauth_token_key', 'token_key', "maas")
@testutils.ConfPatcher('oauth_token_secret', 'token_secret', "maas")
def test_get_oauth_headers(self):
response = self._maasservice._get_oauth_headers(url='196.254.196.254')
self.assertIsInstance(response, dict)
self.assertIn('Authorization', response)
auth = response['Authorization']
self.assertTrue(auth.startswith('OAuth'))
auth = auth[6:]
parts = [item.strip() for item in auth.split(",")]
auth_parts = dict(part.split("=") for part in parts)
required_headers = {
'oauth_token',
'oauth_consumer_key',
'oauth_signature',
}
self.assertTrue(required_headers.issubset(set(auth_parts)))
self.assertEqual('"token_key"', auth_parts['oauth_token'])
self.assertEqual('"consumer_key"', auth_parts['oauth_consumer_key'])
self.assertEqual('"consumer_secret%26token_secret"',
auth_parts['oauth_signature'])
@mock.patch('cloudbaseinit.metadata.services.base.'
'BaseHTTPMetadataService._http_request')
@mock.patch('cloudbaseinit.metadata.services.maasservice.MaaSHttpService'
'._get_oauth_headers')
def test_http_request(self, mock_ouath_headers, mock_http_request):
mock_url = "fake.url"
self._maasservice._http_request(mock_url)
mock_http_request.assert_called_once_with(mock_url, None, {}, None)
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_cache_data")
def test_get_host_name(self, mock_get_cache_data):
response = self._maasservice.get_host_name()
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/local-hostname' %
self._maasservice._metadata_version,
decode=True)
self.assertEqual(mock_get_cache_data.return_value, response)
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_cache_data")
def test_get_instance_id(self, mock_get_cache_data):
response = self._maasservice.get_instance_id()
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/instance-id' % self._maasservice._metadata_version,
decode=True)
self.assertEqual(mock_get_cache_data.return_value, response)
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_cache_data")
def test_get_public_keys(self, mock_get_cache_data):
public_keys = [
"fake key 1",
"fake key 2"
]
public_key = "\n".join(public_keys) + "\n"
mock_get_cache_data.return_value = public_key
response = self._maasservice.get_public_keys()
mock_get_cache_data.assert_called_with(
'%s/meta-data/public-keys' % self._maasservice._metadata_version,
decode=True)
self.assertEqual(public_keys, response)
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_cache_data")
def test_get_client_auth_certs(self, mock_get_cache_data):
certs = [
"{begin}\n{cert}\n{end}".format(
begin=x509constants.PEM_HEADER,
end=x509constants.PEM_FOOTER,
cert=cert)
for cert in ("first cert", "second cert")
]
mock_get_cache_data.return_value = "\n".join(certs) + "\n"
response = self._maasservice.get_client_auth_certs()
mock_get_cache_data.assert_called_with(
'%s/meta-data/x509' % self._maasservice._metadata_version,
decode=True)
self.assertEqual(certs, response)
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_cache_data")
def test_get_user_data(self, mock_get_cache_data):
response = self._maasservice.get_user_data()
mock_get_cache_data.assert_called_once_with(
'%s/user-data' %
self._maasservice._metadata_version)
self.assertEqual(mock_get_cache_data.return_value, response)
def _get_network_data(self):
return {
"version": mock.sentinel.network_data_version,
"config": [{
"mtu": mock.sentinel.link_mtu1,
"name": mock.sentinel.link_name1,
"subnets": [{
"type": maasservice.MAAS_SUBNET_TYPE_MANUAL
}],
"type": maasservice.MAAS_CONFIG_TYPE_PHYSICAL,
"mac_address": mock.sentinel.link_mac1,
"id": mock.sentinel.link_id1
}, {
"mtu": mock.sentinel.link_mtu2,
"name": mock.sentinel.link_name2,
"subnets": [{
"type": maasservice.MAAS_SUBNET_TYPE_MANUAL
}],
"type": maasservice.MAAS_CONFIG_TYPE_PHYSICAL,
"mac_address": mock.sentinel.link_mac2,
"id": mock.sentinel.link_id2
}, {
"mtu": mock.sentinel.link_mtu3,
"name": mock.sentinel.link_name3,
"subnets": [{
"type": maasservice.MAAS_SUBNET_TYPE_MANUAL
}],
"type": maasservice.MAAS_CONFIG_TYPE_PHYSICAL,
"mac_address": mock.sentinel.link_mac3,
"id": mock.sentinel.link_id3
}, {
"name": mock.sentinel.bond_name1,
"id": mock.sentinel.bond_id1,
"type": maasservice.MAAS_CONFIG_TYPE_BOND,
"mac_address": mock.sentinel.bond_mac1,
"bond_interfaces": [
mock.sentinel.link_id1,
mock.sentinel.link_id2
],
"mtu": mock.sentinel.bond_mtu1,
"subnets": [{
"address": mock.sentinel.bond_subnet_address1,
"gateway": mock.sentinel.bond_subnet_gateway1,
"type": maasservice.MAAS_SUBNET_TYPE_STATIC,
"dns_nameservers": [
mock.sentinel.bond_subnet_dns1,
mock.sentinel.bond_subnet_dns2]
}, {
"address": mock.sentinel.bond_subnet_address2,
"type": maasservice.MAAS_SUBNET_TYPE_STATIC,
"dns_nameservers": []
}],
"params": {
"bond-downdelay": 0,
"bond-xmit-hash-policy": mock.sentinel.bond_lb_algo1,
"bond-mode": mock.sentinel.bond_mode1,
"bond-updelay": 0,
"bond-miimon": 100,
"bond-lacp-rate": maasservice.MAAS_BOND_LACP_RATE_FAST
}
}, {
"type": maasservice.MAAS_CONFIG_TYPE_VLAN,
"mtu": mock.sentinel.vlan_mtu1,
"name": mock.sentinel.vlan_name1,
"subnets": [{
"gateway": mock.sentinel.vlan_subnet_gateway1,
"address": mock.sentinel.vlan_subnet_address1,
"type": maasservice.MAAS_SUBNET_TYPE_STATIC,
"dns_nameservers": []
}],
"vlan_id": mock.sentinel.vlan_id1,
"vlan_link": mock.sentinel.bond_id1,
"id": mock.sentinel.vlan_link_id1
}, {
"type": mock.sentinel.nameserver_config_type,
"search": [
mock.sentinel.dns_search1
],
"address": [
mock.sentinel.bond_subnet_dns1,
mock.sentinel.bond_subnet_dns2
],
}]
}
@mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService"
"._get_network_data")
def _test_get_network_details_v2(self, mock_get_network_data,
unsupported_version=False,
invalid_bond_type=False,
invalid_bond_lb_algo=False,
unsupported_config_type=False):
mock.sentinel.bond_subnet_address1 = "10.0.0.1/24"
mock.sentinel.bond_subnet_gateway1 = "10.0.0.254"
mock.sentinel.bond_subnet_address2 = "172.16.0.1/16"
mock.sentinel.vlan_subnet_address1 = "2001:cdba::3257:9652/24"
mock.sentinel.vlan_subnet_gateway1 = "2001:cdba::3257:1"
if invalid_bond_type:
mock.sentinel.bond_mode1 = "invalid bond type"
else:
mock.sentinel.bond_mode1 = network_model.BOND_TYPE_BALANCE_ALB
if invalid_bond_lb_algo:
mock.sentinel.bond_lb_algo1 = "invalid lb algorithm"
else:
mock.sentinel.bond_lb_algo1 = network_model.BOND_LB_ALGO_L2
if unsupported_version:
mock.sentinel.network_data_version = "unsupported"
else:
mock.sentinel.network_data_version = 1
if unsupported_config_type:
mock.sentinel.nameserver_config_type = "unsupported"
else:
mock.sentinel.nameserver_config_type = "nameserver"
network_data = self._get_network_data()
mock_get_network_data.return_value = network_data
if (unsupported_version or invalid_bond_type or invalid_bond_lb_algo or
unsupported_config_type):
with self.assertRaises(exception.CloudbaseInitException):
self._maasservice.get_network_details_v2()
return
network_details = self._maasservice.get_network_details_v2()
self.assertEqual(1, len([
l for l in network_details.links if
l.type == network_model.LINK_TYPE_PHYSICAL and
l.id == mock.sentinel.link_id1 and
l.name == mock.sentinel.link_name1 and
l.enabled is True and
l.mac_address == mock.sentinel.link_mac1 and
l.mtu == mock.sentinel.link_mtu1]))
self.assertEqual(1, len([
l for l in network_details.links if
l.type == network_model.LINK_TYPE_PHYSICAL and
l.id == mock.sentinel.link_id2 and
l.name == mock.sentinel.link_name2 and
l.enabled is True and
l.mac_address == mock.sentinel.link_mac2 and
l.mtu == mock.sentinel.link_mtu2]))
# Disconnected network adapter, ensure it's not enabled
self.assertEqual(1, len([
l for l in network_details.links if
l.type == network_model.LINK_TYPE_PHYSICAL and
l.id == mock.sentinel.link_id3 and
l.name == mock.sentinel.link_name3 and
l.enabled is False and
l.mac_address == mock.sentinel.link_mac3 and
l.mtu == mock.sentinel.link_mtu3]))
self.assertEqual(1, len([
l for l in network_details.links if
l.type == network_model.LINK_TYPE_BOND and
l.id == mock.sentinel.bond_id1 and
l.enabled is True and
l.name == mock.sentinel.bond_name1 and
l.mtu == mock.sentinel.bond_mtu1 and
l.mac_address == mock.sentinel.bond_mac1 and
l.vlan_link is None and
l.vlan_id is None and
l.bond.type == network_model.BOND_TYPE_BALANCE_ALB and
l.bond.members == [
mock.sentinel.link_id1, mock.sentinel.link_id2] and
l.bond.lb_algorithm == network_model.BOND_LB_ALGO_L2 and
l.bond.lacp_rate == network_model.BOND_LACP_RATE_FAST]))
self.assertEqual(1, len([
l for l in network_details.links if
l.type == network_model.LINK_TYPE_VLAN and
l.id == mock.sentinel.vlan_link_id1 and
l.name == mock.sentinel.vlan_name1 and
l.enabled is True and
l.mac_address is None and
l.mtu == mock.sentinel.vlan_mtu1 and
l.vlan_link == mock.sentinel.bond_id1 and
l.vlan_id == mock.sentinel.vlan_id1]))
self.assertEqual(3, len(network_details.networks))
network_bond1 = [
n for n in network_details.networks
if n.address_cidr == mock.sentinel.bond_subnet_address1 and
n.dns_nameservers == [
mock.sentinel.bond_subnet_dns1,
mock.sentinel.bond_subnet_dns2] and
n.link == mock.sentinel.bond_id1 and
n.routes == [network_model.Route(
network_cidr=u'0.0.0.0/0',
gateway=mock.sentinel.bond_subnet_gateway1
)]]
self.assertEqual(1, len(network_bond1))
network_bond2 = [
n for n in network_details.networks
if n.address_cidr == mock.sentinel.bond_subnet_address2 and
n.dns_nameservers == [] and
n.link == mock.sentinel.bond_id1 and
n.routes == []]
self.assertEqual(1, len(network_bond2))
network_vlan1 = [
n for n in network_details.networks
if n.address_cidr == mock.sentinel.vlan_subnet_address1 and
n.dns_nameservers == [] and
n.link == mock.sentinel.vlan_link_id1 and
n.routes == [network_model.Route(
network_cidr=u'::/0',
gateway=mock.sentinel.vlan_subnet_gateway1
)]]
self.assertEqual(1, len(network_vlan1))
self.assertEqual(
[network_model.NameServerService(
addresses=[
mock.sentinel.bond_subnet_dns1,
mock.sentinel.bond_subnet_dns2],
search=[mock.sentinel.dns_search1])],
network_details.services)
def test_get_network_details_v2(self):
self._test_get_network_details_v2()
def test_get_network_details_v2_unsupported_version(self):
self._test_get_network_details_v2(unsupported_version=True)
def test_get_network_details_v2_unsupported_config_type(self):
self._test_get_network_details_v2(unsupported_config_type=True)
def test_get_network_details_v2_invalid_bond_type(self):
self._test_get_network_details_v2(invalid_bond_type=True)
def test_get_network_details_v2_invalid_bond_lb_algo(self):
self._test_get_network_details_v2(invalid_bond_lb_algo=True)
| apache-2.0 |
sysalexis/kbengine | kbe/res/scripts/common/Lib/traceback.py | 87 | 11167 | """Extract, format and print information about Python stack traces."""
import linecache
import sys
import operator
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb',
'clear_frames']
#
# Formatting and printing lists of traceback lines.
#
def _format_list_iter(extracted_list):
for filename, lineno, name, line in extracted_list:
item = ' File "{}", line {}, in {}\n'.format(filename, lineno, name)
if line:
item = item + ' {}\n'.format(line.strip())
yield item
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for item in _format_list_iter(extracted_list):
print(item, file=file, end="")
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
return list(_format_list_iter(extracted_list))
#
# Printing and Extracting Tracebacks.
#
# extractor takes curr and needs to return a tuple of:
# - Frame object
# - Line number
# - Next item (same type as curr)
# In practice, curr is either a traceback or a frame.
def _extract_tb_or_stack_iter(curr, limit, extractor):
if limit is None:
limit = getattr(sys, 'tracebacklimit', None)
n = 0
while curr is not None and (limit is None or n < limit):
f, lineno, next_item = extractor(curr)
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
yield (filename, lineno, name, line)
curr = next_item
n += 1
def _extract_tb_iter(tb, limit):
return _extract_tb_or_stack_iter(
tb, limit,
operator.attrgetter("tb_frame", "tb_lineno", "tb_next"))
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
print_list(extract_tb(tb, limit=limit), file=file)
def format_tb(tb, limit=None):
"""A shorthand for 'format_list(extract_tb(tb, limit))'."""
return format_list(extract_tb(tb, limit=limit))
def extract_tb(tb, limit=None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
return list(_extract_tb_iter(tb, limit=limit))
#
# Exception formatting and output.
#
_cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n")
_context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n")
def _iter_chain(exc, custom_tb=None, seen=None):
if seen is None:
seen = set()
seen.add(exc)
its = []
context = exc.__context__
cause = exc.__cause__
if cause is not None and cause not in seen:
its.append(_iter_chain(cause, False, seen))
its.append([(_cause_message, None)])
elif (context is not None and
not exc.__suppress_context__ and
context not in seen):
its.append(_iter_chain(context, None, seen))
its.append([(_context_message, None)])
its.append([(exc, custom_tb or exc.__traceback__)])
# itertools.chain is in an extension module and may be unavailable
for it in its:
yield from it
def _format_exception_iter(etype, value, tb, limit, chain):
if chain:
values = _iter_chain(value, tb)
else:
values = [(value, tb)]
for value, tb in values:
if isinstance(value, str):
# This is a cause/context message line
yield value + '\n'
continue
if tb:
yield 'Traceback (most recent call last):\n'
yield from _format_list_iter(_extract_tb_iter(tb, limit=limit))
yield from _format_exception_only_iter(type(value), value)
def print_exception(etype, value, tb, limit=None, file=None, chain=True):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
for line in _format_exception_iter(etype, value, tb, limit, chain):
print(line, file=file, end="")
def format_exception(etype, value, tb, limit=None, chain=True):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
return list(_format_exception_iter(etype, value, tb, limit, chain))
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
return list(_format_exception_only_iter(etype, value))
def _format_exception_only_iter(etype, value):
# Gracefully handle (the way Python 2.4 and earlier did) the case of
# being called with (None, None).
if etype is None:
yield _format_final_exc_line(etype, value)
return
stype = etype.__name__
smod = etype.__module__
if smod not in ("__main__", "builtins"):
stype = smod + '.' + stype
if not issubclass(etype, SyntaxError):
yield _format_final_exc_line(stype, value)
return
# It was a syntax error; show exactly where the problem was found.
filename = value.filename or "<string>"
lineno = str(value.lineno) or '?'
yield ' File "{}", line {}\n'.format(filename, lineno)
badline = value.text
offset = value.offset
if badline is not None:
yield ' {}\n'.format(badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')
offset = min(len(caretspace), offset) - 1
caretspace = caretspace[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
yield ' {}^\n'.format(''.join(caretspace))
msg = value.msg or "<no detail available>"
yield "{}: {}\n".format(stype, msg)
def _format_final_exc_line(etype, value):
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except:
return '<unprintable %s object>' % type(value).__name__
def print_exc(limit=None, file=None, chain=True):
"""Shorthand for 'print_exception(*sys.exc_info(), limit, file)'."""
print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)
def format_exc(limit=None, chain=True):
"""Like print_exc() but return a string."""
return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))
def print_last(limit=None, file=None, chain=True):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file, chain)
#
# Printing and Extracting Stacks.
#
def _extract_stack_iter(f, limit=None):
return _extract_tb_or_stack_iter(
f, limit, lambda f: (f, f.f_lineno, f.f_back))
def _get_stack(f):
if f is None:
f = sys._getframe().f_back.f_back
return f
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
print_list(extract_stack(_get_stack(f), limit=limit), file=file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
return format_list(extract_stack(_get_stack(f), limit=limit))
def extract_stack(f=None, limit=None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
stack = list(_extract_stack_iter(_get_stack(f), limit=limit))
stack.reverse()
return stack
def clear_frames(tb):
"Clear all references to local variables in the frames of a traceback."
while tb is not None:
try:
tb.tb_frame.clear()
except RuntimeError:
# Ignore the exception raised if the frame is still executing.
pass
tb = tb.tb_next
| lgpl-3.0 |
magnatronus/titanium-sac | lib/tiutils.py | 1 | 2781 | #
# tiutils.py is a Titanium function library for use with the SpiralArm Titanium plug-in for Sublime Text 3
#
# developed by Steve Rogers, SpiralArm Consulting Ltd (www.spiralarm.uk)
# @sarmcon
#
#
import sublime, subprocess,os
from os.path import expanduser
# read in our default Titanium settings
settings = sublime.load_settings('titanium-sac.sublime-settings')
LOGLEVEL = settings.get("logLevel", "info")
PLATFORMS = settings.get("platforms", "ios,android")
URL = settings.get("url", "http://www.mywebaddress")
SDK = settings.get("sdk", "5.0.0.GA")
workSpace = settings.get("workspace", "/")
tiPath = settings.get("tiPath", "")
rootAppId = settings.get("appId", "com.myapp")
# set up some other useful vars
home = expanduser("~")
new_env = os.environ.copy()
new_env['PATH'] = new_env['PATH']+ ":" + tiPath
WORKSPACEDIR = home + workSpace
# Run a Ti based shell command
def runCommand(params):
subprocess.Popen(params, env=new_env).wait()
# Print out a console message
def consolePrint(label, message):
print("%s> %s" % (label,message))
# Generate our application id
def getAppId(projectName):
return rootAppId + "." + projectName
# Generate our fully qualified project name
def getProjectDirectory(projectName):
return home + workSpace + "/" + projectName
# Create a classic project
def createClassicProject(projectName):
consolePrint('info', "Creating Titanium Project....")
runCommand(['ti', "create", "--force","--type", "app", "--sdk", SDK, "--id", getAppId(projectName), "--log-level", LOGLEVEL, "--name", projectName, "--workspace-dir", WORKSPACEDIR,"--platform", PLATFORMS, "--url", URL])
# Add the Alloy Files
def generateAlloyProject(projectDir):
consolePrint('info', "Generating Alloy Files....")
subprocess.Popen(['alloy', "new", projectDir, "--force"], env=new_env).wait()
# Clean the current project
def cleanProject(projectDir):
consolePrint('info', "Cleaning Project....")
subprocess.Popen(['ti', "clean", "--project-dir", projectDir, "--log-level", LOGLEVEL, "--platforms", PLATFORMS], env=new_env).wait()
# Add an Alloy widget to the project
def createAlloyWidget(path, name):
consolePrint('info', "Creating Widget %s...." % name)
subprocess.Popen(['alloy', "generate", "widget", name, "--outputPath", path], env=new_env).wait()
# Add an Alloy controller to the project
def createAlloyController(path, name):
consolePrint('info', "Creating Controller %s...." % name)
subprocess.Popen(['alloy', "generate", "controller", name, "--outputPath", path], env=new_env).wait()
# Create the Sublime Project File
def createSublimeProject(projectDir):
content = '{"folders":[{"path": "%s"}]}' % projectDir
projectFile = open(projectDir+".sublime-project","w")
projectFile.write(content);
projectFile.close()
| mit |
r0h4n/commons | tendrl/commons/flows/expand_cluster/gluster_help.py | 2 | 1884 | from tendrl.commons.flows.exceptions import FlowExecutionFailedError
from tendrl.commons.utils import log_utils as logger
def get_node_ips(parameters):
node_ips = []
for node, config in parameters["Cluster.node_configuration"].iteritems():
node_ips.append(config["provisioning_ip"])
return node_ips
def expand_gluster(parameters):
node_ips = get_node_ips(parameters)
plugin = NS.gluster_provisioner.get_plugin()
cluster = NS.tendrl.objects.Cluster(
integration_id=parameters['TendrlContext.integration_id']
).load()
logger.log(
"info",
NS.publisher_id,
{"message": "Setting up gluster nodes for cluster %s" %
cluster.short_name},
job_id=parameters['job_id'],
flow_id=parameters['flow_id'],
)
ret_val = plugin.setup_gluster_node(
node_ips,
repo=NS.config.data.get('glusterfs_repo', None)
)
if ret_val is not True:
raise FlowExecutionFailedError("Error setting up gluster node")
logger.log(
"info",
NS.publisher_id,
{"message": "Expanding gluster cluster %s" %
cluster.short_name},
job_id=parameters['job_id'],
flow_id=parameters['flow_id']
)
failed_nodes = []
for node in node_ips:
ret_val = plugin.expand_gluster_cluster(node)
if not ret_val:
failed_nodes.append(node)
if failed_nodes:
raise FlowExecutionFailedError(
"Error expanding gluster cluster. Following nodes failed: %s" %
",".join(failed_nodes)
)
logger.log(
"info",
NS.publisher_id,
{"message": "Expanded Gluster Cluster %s"
" with nodes %s" % (
cluster.short_name,
",".join(node_ips))},
job_id=parameters['job_id'],
flow_id=parameters['flow_id']
)
| lgpl-2.1 |
Tiendil/deworld | deworld/cartographer.py | 1 | 4067 | # coding: utf-8
import os
try:
from PIL import Image
except:
pass
from deworld.map_colors import HeightColorMap, RGBColorMap
from deworld.layers import VEGETATION_TYPE
def draw_image(turn, catalog, layer, power_points, colorizer):
if not os.path.exists(catalog):
os.makedirs(catalog)
img = Image.new('RGB', (layer.w, layer.h))
data = []
for row in layer.data:
for cell in row:
data.append(colorizer(cell, discret=False).rgb)
for point in power_points.values():
data[point.y * layer.w + point.x] = (0, 0, 0)
img.putdata(data)
img.save('%s/%.3d.png' % (catalog, turn))
def wind_colorizer(wind, discret=False):
r, g, b = 0.5, 0.5, 0.5
g += wind[0] * 0.5
b += wind[1] * 0.5
return RGBColorMap.get_color(r=r, g=g, b=b)
def temperature_colorizer(temp, discret=False):
r, g, b = 0.5, 0.5, 0.5
if temp < 0.5:
b += temp
else:
r += (temp - 0.5)
return RGBColorMap.get_color(r=r, g=g, b=b)
def wetness_colorizer(wetness, discret=False):
return RGBColorMap.get_color(r=1.0-wetness, g=1.0-wetness, b=1.0)
def vegetation_colorizer(vegetation, discret=False):
if vegetation == VEGETATION_TYPE.GRASS:
return RGBColorMap.get_color(r=55.0/256, g=200.0/256, b=55.0/256)
if vegetation == VEGETATION_TYPE.FOREST:
return RGBColorMap.get_color(r=55.0/256, g=125.0/256, b=55.0/256)
if vegetation == VEGETATION_TYPE.DESERT:
return RGBColorMap.get_color(r=244.0/256, g=164.0/256, b=96.0/256)
return RGBColorMap.get_color(r=0.0, g=0.0, b=0.0)
def soil_colorizer(soil, discret=False):
return RGBColorMap.get_color(r=0.0, g=soil, b=0.0)
def atmo_wind_colorizer(point, discret=False):
return wind_colorizer(point.wind, discret=discret)
def atmo_temperature_colorizer(point, discret=False):
return temperature_colorizer(point.temperature, discret=discret)
def atmo_wetness_colorizer(point, discret=False):
return wetness_colorizer(point.wetness, discret=discret)
def draw_world(turn, world, catalog):
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'height'),
layer=world.layer_height,
power_points=world.power_points,
colorizer=HeightColorMap.get_color)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'temperature'),
layer=world.layer_temperature,
power_points=world.power_points,
colorizer=temperature_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'wind'),
layer=world.layer_wind,
power_points=world.power_points,
colorizer=wind_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'wetness'),
layer=world.layer_wetness,
power_points=world.power_points,
colorizer=wetness_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'vegetation'),
layer=world.layer_vegetation,
power_points=world.power_points,
colorizer=vegetation_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'soil'),
layer=world.layer_soil,
power_points=world.power_points,
colorizer=soil_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'atmo_wind'),
layer=world.layer_atmosphere,
power_points=world.power_points,
colorizer=atmo_wind_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'atmo_temperature'),
layer=world.layer_atmosphere,
power_points=world.power_points,
colorizer=atmo_temperature_colorizer)
draw_image(turn=turn,
catalog='%s/%s' % (catalog, 'atmo_wetness'),
layer=world.layer_atmosphere,
power_points=world.power_points,
colorizer=atmo_wetness_colorizer)
| bsd-2-clause |
pfeyz/psiTurk | psiturk/command_line.py | 6 | 3137 | ''' This module supports commandline functionality '''
import argparse
import sys, os
from psiturk.version import version_number
from psiturk.psiturk_org_services import ExperimentExchangeServices
def process():
''' Figure out how we were invoked '''
invoked_as = os.path.basename(sys.argv[0])
if invoked_as == "psiturk":
launch_shell()
elif invoked_as == "psiturk-server":
launch_server()
elif invoked_as == "psiturk-shell":
launch_shell()
elif invoked_as == "psiturk-setup-example":
setup_example()
elif invoked_as == "psiturk-install":
install_from_exchange()
def install_from_exchange():
''' Install from experiment exchange. '''
parser = argparse.ArgumentParser(
description='Download experiment from the psiturk.org experiment\
exchange (http://psiturk.org/ee).'
)
parser.add_argument(
'exp_id', metavar='exp_id', type=str, help='the id number of the\
experiment in the exchange'
)
args = parser.parse_args()
exp_exch = ExperimentExchangeServices()
exp_exch.download_experiment(args.exp_id)
def setup_example():
''' Add commands for testing, etc. '''
parser = argparse.ArgumentParser(
description='Creates a simple default project (stroop) in the current\
directory with the necessary psiTurk files.'
)
# Optional flags
parser.add_argument(
'-v', '--version', help='Print version number.', action="store_true"
)
args = parser.parse_args()
# If requested version just print and quite
if args.version:
print version_number
else:
import psiturk.setup_example as se
se.setup_example()
def launch_server():
''' Add commands for testing, etc.. '''
parser = argparse.ArgumentParser(
description='Launch psiTurk experiment webserver process on the\
host/port defined in config.txt.'
)
# Optional flags
parser.add_argument(
'-v', '--version', help='Print version number.', action="store_true"
)
args = parser.parse_args()
# If requested version just print and quite
if args.version:
print version_number
else:
import psiturk.experiment_server as es
es.launch()
def launch_shell():
''' Add commands for testing, etc.. '''
parser = argparse.ArgumentParser(
description='Launch the psiTurk interactive shell.'
)
# Optional flags
parser.add_argument(
'-v', '--version', help='Print version number.', action="store_true"
)
parser.add_argument(
'-c', '--cabinmode', help='Launch psiturk in cabin (offline) mode',
action="store_true"
)
parser.add_argument(
'-s', '--script', help='Run commands from a script file'
)
args = parser.parse_args()
# If requested version just print and quite
if args.version:
print version_number
else:
import psiturk.psiturk_shell as ps
if args.script:
ps.run(cabinmode=args.cabinmode, script=args.script)
else:
ps.run(cabinmode=args.cabinmode)
| mit |
llvm/llvm-lnt | lnt/util/wsgi_restart.py | 1 | 3202 | # This code lifted from the mod_wsgi docs.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
from pathlib import Path
from typing import Sequence
import sys
import signal
import threading
import atexit
import queue
_interval = 1.0
_times = {}
_files = [] # type: Sequence[Path]
_running = False
_queue = queue.Queue() # type: queue.Queue
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except Exception:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while True:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except Exception:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except Exception:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if path not in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()
| apache-2.0 |
zhangyage/Python-oldboy | day07/paramiko-1.15.2/paramiko-1.15.2/paramiko/pipe.py | 44 | 4005 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Abstraction of a one-way pipe where the read end can be used in
`select.select`. Normally this is trivial, but Windows makes it nearly
impossible.
The pipe acts like an Event, which can be set or cleared. When set, the pipe
will trigger as readable in `select <select.select>`.
"""
import sys
import os
import socket
from paramiko.py3compat import b
def make_pipe():
if sys.platform[:3] != 'win':
p = PosixPipe()
else:
p = WindowsPipe()
return p
class PosixPipe (object):
def __init__(self):
self._rfd, self._wfd = os.pipe()
self._set = False
self._forever = False
self._closed = False
def close(self):
os.close(self._rfd)
os.close(self._wfd)
# used for unit tests:
self._closed = True
def fileno(self):
return self._rfd
def clear(self):
if not self._set or self._forever:
return
os.read(self._rfd, 1)
self._set = False
def set(self):
if self._set or self._closed:
return
self._set = True
os.write(self._wfd, b'*')
def set_forever(self):
self._forever = True
self.set()
class WindowsPipe (object):
"""
On Windows, only an OS-level "WinSock" may be used in select(), but reads
and writes must be to the actual socket object.
"""
def __init__(self):
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('127.0.0.1', 0))
serv.listen(1)
# need to save sockets in _rsock/_wsock so they don't get closed
self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._rsock.connect(('127.0.0.1', serv.getsockname()[1]))
self._wsock, addr = serv.accept()
serv.close()
self._set = False
self._forever = False
self._closed = False
def close(self):
self._rsock.close()
self._wsock.close()
# used for unit tests:
self._closed = True
def fileno(self):
return self._rsock.fileno()
def clear (self):
if not self._set or self._forever:
return
self._rsock.recv(1)
self._set = False
def set (self):
if self._set or self._closed:
return
self._set = True
self._wsock.send(b'*')
def set_forever (self):
self._forever = True
self.set()
class OrPipe (object):
def __init__(self, pipe):
self._set = False
self._partner = None
self._pipe = pipe
def set(self):
self._set = True
if not self._partner._set:
self._pipe.set()
def clear(self):
self._set = False
if not self._partner._set:
self._pipe.clear()
def make_or_pipe(pipe):
"""
wraps a pipe into two pipe-like objects which are "or"d together to
affect the real pipe. if either returned pipe is set, the wrapped pipe
is set. when both are cleared, the wrapped pipe is cleared.
"""
p1 = OrPipe(pipe)
p2 = OrPipe(pipe)
p1._partner = p2
p2._partner = p1
return p1, p2
| apache-2.0 |
alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/win32/Demos/mmapfile_demo.py | 4 | 2822 | import win32api, mmapfile
import winerror
import tempfile, os
from pywin32_testutil import str2bytes
system_info=win32api.GetSystemInfo()
page_size=system_info[1]
alloc_size=system_info[7]
fname=tempfile.mktemp()
mapping_name=os.path.split(fname)[1]
fsize=8*page_size
print fname, fsize, mapping_name
m1=mmapfile.mmapfile(File=fname, Name=mapping_name, MaximumSize=fsize)
m1.seek(100)
m1.write_byte(str2bytes('?'))
m1.seek(-1,1)
assert m1.read_byte()==str2bytes('?')
## A reopened named mapping should have exact same size as original mapping
m2=mmapfile.mmapfile(Name=mapping_name, File=None, MaximumSize=fsize*2)
assert m2.size()==m1.size()
m1.seek(0,0)
m1.write(fsize*str2bytes('s'))
assert m2.read(fsize)==fsize*str2bytes('s')
move_src=100
move_dest=500
move_size=150
m2.seek(move_src,0)
assert m2.tell()==move_src
m2.write(str2bytes('m')*move_size)
m2.move(move_dest, move_src, move_size)
m2.seek(move_dest, 0)
assert m2.read(move_size) == str2bytes('m') * move_size
## m2.write('x'* (fsize+1))
m2.close()
m1.resize(fsize*2)
assert m1.size()==fsize * 2
m1.seek(fsize)
m1.write(str2bytes('w') * fsize)
m1.flush()
m1.close()
os.remove(fname)
## Test a file with size larger than 32 bits
## need 10 GB free on drive where your temp folder lives
fname_large=tempfile.mktemp()
mapping_name='Pywin32_large_mmap'
offsetdata=str2bytes('This is start of offset')
## Deliberately use odd numbers to test rounding logic
fsize = (1024*1024*1024*10) + 333
offset = (1024*1024*32) + 42
view_size = (1024*1024*16) + 111
## round mapping size and view size up to multiple of system page size
if fsize%page_size:
fsize += page_size - (fsize%page_size)
if view_size%page_size:
view_size += page_size - (view_size%page_size)
## round offset down to multiple of allocation granularity
offset -= offset%alloc_size
m1=None
m2=None
try:
try:
m1=mmapfile.mmapfile(fname_large, mapping_name, fsize, 0, offset*2)
except mmapfile.error, exc:
# if we don't have enough disk-space, that's OK.
if exc.winerror!=winerror.ERROR_DISK_FULL:
raise
print "skipping large file test - need", fsize, "available bytes."
else:
m1.seek(offset)
m1.write(offsetdata)
## When reopening an existing mapping without passing a file handle, you have
## to specify a positive size even though it's ignored
m2=mmapfile.mmapfile(File=None, Name=mapping_name, MaximumSize=1,
FileOffset=offset, NumberOfBytesToMap=view_size)
assert m2.read(len(offsetdata))==offsetdata
finally:
if m1 is not None:
m1.close()
if m2 is not None:
m2.close()
if os.path.exists(fname_large):
os.remove(fname_large)
| agpl-3.0 |
pdellaert/ansible | lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py | 19 | 10046 | #!/usr/bin/python
#
# Copyright (c) 2019 Hai Cao, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_cdnendpoint_info
version_added: "2.9"
short_description: Get Azure CDN endpoint facts
description:
- Get facts for a specific Azure CDN endpoint or all Azure CDN endpoints.
options:
resource_group:
description:
- Name of resource group where this CDN profile belongs to.
required: true
profile_name:
description:
- Name of CDN profile.
required: true
name:
description:
- Limit results to a specific Azure CDN endpoint.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- Hai Cao (@caohai)
- Yunge zhu (@yungezz)
'''
EXAMPLES = '''
- name: Get facts for all endpoints in CDN profile
azure_rm_cdnendpoint_info:
resource_group: myResourceGroup
profile_name: myCDNProfile
- name: Get facts of specific CDN endpoint
azure_rm_cdnendpoint_info:
resource_group: myResourceGroup
profile_name: myCDNProfile
name: myEndpoint1
'''
RETURN = '''
cdnendpoints:
description: List of Azure CDN endpoints.
returned: always
type: complex
contains:
resource_group:
description:
- Name of a resource group where the Azure CDN endpoint exists.
returned: always
type: str
sample: myResourceGroup
name:
description:
- Name of the Azure CDN endpoint.
returned: always
type: str
sample: myEndpoint
profile_name:
description:
- Name of the Azure CDN profile that this endpoint is attached to.
returned: always
type: str
sample: myProfile
location:
description:
- Location of the Azure CDN endpoint.
type: str
sample: WestUS
id:
description:
- ID of the Azure CDN endpoint.
type: str
sample:
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myCDN/providers/Microsoft.Cdn/profiles/myProfile/endpoints/myEndpoint1"
provisioning_state:
description:
- Provisioning status of the Azure CDN endpoint.
type: str
sample: Succeeded
resource_state:
description:
- Resource status of the profile.
type: str
sample: Running
is_compression_enabled:
description:
- Indicates whether content compression is enabled on CDN.
type: bool
sample: true
is_http_allowed:
description:
- Indicates whether HTTP traffic is allowed on the endpoint.
type: bool
sample: true
is_https_allowed:
description:
- Indicates whether HTTPS traffic is allowed on the endpoint.
type: bool
sample: true
query_string_caching_behavior:
description:
- Defines how CDN caches requests that include query strings.
type: str
sample: IgnoreQueryString
content_types_to_compress:
description:
- List of content types on which compression applies.
type: list
sample: [
"text/plain",
"text/html",
"text/css",
"text/javascript",
"application/x-javascript",
"application/javascript",
"application/json",
"application/xml"
]
origins:
description:
- The source of the content being delivered via CDN.
sample: {
"host_name": "xxxxxxxx.blob.core.windows.net",
"http_port": null,
"https_port": null,
"name": "xxxxxxxx-blob-core-windows-net"
}
origin_host_header:
description:
- The host header value sent to the origin with each request.
type: str
sample: xxxxxxxx.blob.core.windows.net
origin_path:
description:
- A directory path on the origin that CDN can use to retrieve content from.
type: str
sample: /pic/
tags:
description:
- The tags of the Azure CDN endpoint.
type: list
sample: foo
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from azure.mgmt.cdn import CdnManagementClient
from azure.mgmt.cdn.models import ErrorResponseException
from azure.common import AzureHttpError
except ImportError:
# handled in azure_rm_common
pass
import re
AZURE_OBJECT_CLASS = 'endpoints'
class AzureRMCdnEndpointInfo(AzureRMModuleBase):
"""Utility class to get Azure Azure CDN endpoint facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(
type='str',
required=True
),
profile_name=dict(
type='str',
required=True
),
tags=dict(type='list')
)
self.results = dict(
changed=False,
cdnendpoints=[]
)
self.name = None
self.resource_group = None
self.profile_name = None
self.tags = None
super(AzureRMCdnEndpointInfo, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_cdnendpoint_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_cdnendpoint_facts' module has been renamed to 'azure_rm_cdnendpoint_info'", version='2.13')
for key in self.module_args:
setattr(self, key, kwargs[key])
self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-04-02')
if self.name:
self.results['cdnendpoints'] = self.get_item()
else:
self.results['cdnendpoints'] = self.list_by_profile()
return self.results
def get_item(self):
"""Get a single Azure Azure CDN endpoint"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.cdn_client.endpoints.get(
self.resource_group, self.profile_name, self.name)
except ErrorResponseException:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_cdnendpoint(item)]
return result
def list_by_profile(self):
"""Get all Azure Azure CDN endpoints within an Azure CDN profile"""
self.log('List all Azure CDN endpoints within an Azure CDN profile')
try:
response = self.cdn_client.endpoints.list_by_profile(
self.resource_group, self.profile_name)
except ErrorResponseException as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_cdnendpoint(item))
return results
def serialize_cdnendpoint(self, cdnendpoint):
'''
Convert a Azure CDN endpoint object to dict.
:param cdn: Azure CDN endpoint object
:return: dict
'''
result = self.serialize_obj(cdnendpoint, AZURE_OBJECT_CLASS)
new_result = {}
new_result['id'] = cdnendpoint.id
new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id']))
new_result['profile_name'] = re.sub('\\/.*', '', re.sub('.*profiles\\/', '', result['id']))
new_result['name'] = cdnendpoint.name
new_result['type'] = cdnendpoint.type
new_result['location'] = cdnendpoint.location
new_result['resource_state'] = cdnendpoint.resource_state
new_result['provisioning_state'] = cdnendpoint.provisioning_state
new_result['query_string_caching_behavior'] = cdnendpoint.query_string_caching_behavior
new_result['is_compression_enabled'] = cdnendpoint.is_compression_enabled
new_result['is_http_allowed'] = cdnendpoint.is_http_allowed
new_result['is_https_allowed'] = cdnendpoint.is_https_allowed
new_result['content_types_to_compress'] = cdnendpoint.content_types_to_compress
new_result['origin_host_header'] = cdnendpoint.origin_host_header
new_result['origin_path'] = cdnendpoint.origin_path
new_result['origin'] = dict(
name=cdnendpoint.origins[0].name,
host_name=cdnendpoint.origins[0].host_name,
http_port=cdnendpoint.origins[0].http_port,
https_port=cdnendpoint.origins[0].https_port
)
new_result['tags'] = cdnendpoint.tags
return new_result
def main():
"""Main module execution code path"""
AzureRMCdnEndpointInfo()
if __name__ == '__main__':
main()
| gpl-3.0 |
galaxy001/libtorrent | python_BTL_BitTorrent-5.3-GPL/BTL/twisted_brpc.py | 5 | 24332 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A generic resource for publishing objects via BRPC.
Requires BRPC
API Stability: semi-stable
"""
from __future__ import nested_scopes
__version__ = "$Revision: 1.32 $"[11:-2]
# System Imports
import brpc
import urlparse
from cStringIO import StringIO
from gzip import GzipFile
pipeline_debug = False
version = "1.0"
from BTL.platform import app_name
from BTL.reactor_magic import reactor
from BTL.exceptions import str_exc
from BTL.protocol import SmartReconnectingClientFactory
from BTL.brpclib import ServerProxy
import twisted.web
if twisted.web.__version__ < '0.6.0':
raise ImportError("BTL.twisted_brpc requires twisted.web 0.6.0 or greater,"
" from Twisted 2.4.0.\nYou appear to have twisted.web "
"version %s installed at:\n%s" % (twisted.web.__version__,
twisted.web.__file__))
from twisted.web import resource, server
from twisted.internet import protocol
from twisted.python import log, reflect, failure
from twisted.web import http
from twisted.internet import defer
# Useful so people don't need to import brpc directly
Fault = brpc.Fault
class NoSuchFunction(Fault):
"""There is no function by the given name."""
pass
class Handler:
"""Handle a BRPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single BRPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
self.resource = resource # the BRPC resource we are connected to
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
try:
raise NotImplementedError("Implement run() in subclasses")
except:
self.result.errback(failure.Failure())
def parse_accept_encoding(header):
a = header.split(',')
l = []
for i in a:
i = i.strip()
if ';' not in i:
type = i
# hmmm
l.append(('1', type))
else:
type, q = i.split(';')
type = type.strip()
q = q.strip()
junk, q = q.split('=')
q = q.strip()
if q != '0':
l.append((q, type))
l.sort()
l.reverse()
l = [ t for q, t in l ]
return l
class BRPC(resource.Resource):
"""A resource that implements BRPC.
You probably want to connect this to '/RPC2'.
Methods published can return BRPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'brpc_' are published.
Sub-handlers for prefixed methods (e.g., system.listMethods)
can be added with putSubHandler. By default, prefixes are
separated with a '.'. Override self.separator to change this.
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
separator = '.'
def __init__(self):
resource.Resource.__init__(self)
self.subHandlers = {}
def putSubHandler(self, prefix, handler):
self.subHandlers[prefix] = handler
def getSubHandler(self, prefix):
return self.subHandlers.get(prefix, None)
def getSubHandlerPrefixes(self):
return self.subHandlers.keys()
def _err(self, *a, **kw):
log.err(*a, **kw)
def render(self, request):
request.setHeader('server', "%s/%s" % (app_name, version))
request.content.seek(0, 0)
args, functionPath = brpc.loads(request.content.read())
args, kwargs = args
request.functionPath = functionPath
try:
function = self._getFunction(functionPath)
except Fault, f:
self._cbRender(f, request)
else:
request.setHeader("content-type", "application/octet-stream")
defer.maybeDeferred(function, *args, **kwargs).addErrback(
self._ebRender
).addCallback(
self._cbRender, request
)
return server.NOT_DONE_YET
def _cbRender(self, result, request):
if isinstance(result, Handler):
result = result.result
if not isinstance(result, Fault):
result = (result,)
try:
s = brpc.dumps(result, methodresponse=1)
except Exception, e:
f = Fault(self.FAILURE,
"function:%s can't serialize output: %s" %
(request.functionPath, str_exc(e)))
self._err(f)
s = brpc.dumps(f, methodresponse=1)
encoding = request.getHeader("accept-encoding")
if encoding:
encodings = parse_accept_encoding(encoding)
if 'gzip' in encodings or '*' in encodings:
sio = StringIO()
g = GzipFile(fileobj=sio, mode='wb', compresslevel=9)
g.write(s)
g.close()
s = sio.getvalue()
request.setHeader("Content-Encoding", "gzip")
request.setHeader("content-length", str(len(s)))
request.write(s)
request.finish()
def _ebRender(self, failure):
self._err(failure)
if isinstance(failure.value, Fault):
return failure.value
return Fault(self.FAILURE, "An unhandled exception occurred: %s" %
failure.getErrorMessage())
def _getFunction(self, functionPath):
"""Given a string, return a function, or raise NoSuchFunction.
This returned function will be called, and should return the result
of the call, a Deferred, or a Fault instance.
Override in subclasses if you want your own policy. The default
policy is that given functionPath 'foo', return the method at
self.brpc_foo, i.e. getattr(self, "brpc_" + functionPath).
If functionPath contains self.separator, the sub-handler for
the initial prefix is used to search for the remaining path.
"""
if functionPath.find(self.separator) != -1:
prefix, functionPath = functionPath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None: raise NoSuchFunction(self.NOT_FOUND, "no such subHandler %s" % prefix)
return handler._getFunction(functionPath)
f = getattr(self, "brpc_%s" % functionPath, None)
if not f:
raise NoSuchFunction(self.NOT_FOUND, "function %s not found" % functionPath)
elif not callable(f):
raise NoSuchFunction(self.NOT_FOUND, "function %s not callable" % functionPath)
else:
return f
def _listFunctions(self):
"""Return a list of the names of all brpc methods."""
return reflect.prefixedMethodNames(self.__class__, 'brpc_')
class BRPCIntrospection(BRPC):
"""Implement the BRPC Introspection API.
By default, the methodHelp method returns the 'help' method attribute,
if it exists, otherwise the __doc__ method attribute, if it exists,
otherwise the empty string.
To enable the methodSignature method, add a 'signature' method attribute
containing a list of lists. See methodSignature's documentation for the
format. Note the type strings should be BRPC types, not Python types.
"""
def __init__(self, parent):
"""Implement Introspection support for an BRPC server.
@param parent: the BRPC server to add Introspection support to.
"""
BRPC.__init__(self)
self._brpc_parent = parent
def brpc_listMethods(self):
"""Return a list of the method names implemented by this server."""
functions = []
todo = [(self._brpc_parent, '')]
while todo:
obj, prefix = todo.pop(0)
functions.extend([ prefix + name for name in obj._listFunctions() ])
todo.extend([ (obj.getSubHandler(name),
prefix + name + obj.separator)
for name in obj.getSubHandlerPrefixes() ])
return functions
brpc_listMethods.signature = [['array']]
def brpc_methodHelp(self, method):
"""Return a documentation string describing the use of the given method.
"""
method = self._brpc_parent._getFunction(method)
return (getattr(method, 'help', None)
or getattr(method, '__doc__', None) or '')
brpc_methodHelp.signature = [['string', 'string']]
def brpc_methodSignature(self, method):
"""Return a list of type signatures.
Each type signature is a list of the form [rtype, type1, type2, ...]
where rtype is the return type and typeN is the type of the Nth
argument. If no signature information is available, the empty
string is returned.
"""
method = self._brpc_parent._getFunction(method)
return getattr(method, 'signature', None) or ''
brpc_methodSignature.signature = [['array', 'string'],
['string', 'string']]
def addIntrospection(brpc):
"""Add Introspection support to an BRPC server.
@param brpc: The brpc server to add Introspection support to.
"""
brpc.putSubHandler('system', BRPCIntrospection(brpc))
class Query(object):
def __init__(self, path, host, method, user=None, password=None, *args):
self.path = path
self.host = host
self.user = user
self.password = password
self.method = method
self.payload = brpc.dumps(args, method)
self.deferred = defer.Deferred()
self.decode = False
class QueryProtocol(http.HTTPClient):
# All current queries are pipelined over the connection at
# once. When the connection is made, or as queries are made
# while a connection exists, queries are all sent to the
# server. Pipelining limits can be controlled by the caller.
# When a query completes (see parseResponse), if there are no
# more queries then an idle timeout gets sets.
# The QueryFactory reopens the connection if another query occurs.
#
# twisted_brpc does currently provide a mechanism for
# per-query timeouts. This could be added with another
# timeout_call mechanism that calls loseConnection and pops the
# current query with an errback.
timeout = 300 # idle timeout.
def log(self, msg, *a):
print "%s: %s: %r" % (self.peer, msg, a)
def connectionMade(self):
http.HTTPClient.connectionMade(self)
self.current_queries = []
self.timeout_call = None
if pipeline_debug:
p = self.transport.getPeer()
p = "%s:%d" % (p.host, p.port)
self.peer = (id(self.transport), p)
self.factory.connectionMade(self)
def _cancelTimeout(self):
if self.timeout_call and self.timeout_call.active():
self.timeout_call.cancel()
self.timeout_call = None
def connectionLost(self, reason):
http.HTTPClient.connectionLost(self, reason)
if pipeline_debug: self.log('connectionLost', reason.getErrorMessage())
self._cancelTimeout()
if self.current_queries:
# queries failed, put them back
if pipeline_debug: self.log('putting back', [q.method for q in self.current_queries])
self.factory.prependQueries(self.current_queries)
self.factory.connectionLost(self)
def sendCommand(self, command, path):
self.transport.write('%s %s HTTP/1.1\r\n' % (command, path))
def setLineMode(self, rest):
# twisted is stupid.
self.firstLine = 1
return http.HTTPClient.setLineMode(self, rest)
def sendQuery(self):
self._cancelTimeout()
query = self.factory.popQuery()
if pipeline_debug: self.log('sending', query.method)
self.current_queries.append(query)
self.sendCommand('POST', query.path)
self.sendHeader('User-Agent', 'BTL/BRPC 1.0')
self.sendHeader('Host', query.host)
self.sendHeader('Accept-encoding', 'gzip')
self.sendHeader('Connection', 'Keep-Alive')
self.sendHeader('Content-type', 'application/octet-stream')
self.sendHeader('Content-length', str(len(query.payload)))
#if query.user:
# auth = '%s:%s' % (query.user, query.password)
# auth = auth.encode('base64').strip()
# self.sendHeader('Authorization', 'Basic %s' % (auth,))
self.endHeaders()
self.transport.write(query.payload)
def parseResponse(self, contents):
query = self.current_queries.pop(0)
if pipeline_debug: self.log('responded', query.method)
if not self.current_queries:
assert not self.factory.anyQueries()
assert not self.timeout_call
self.timeout_call = reactor.callLater(self.timeout,
self.transport.loseConnection)
try:
response = brpc.loads(contents)
except Exception, e:
query.deferred.errback(failure.Failure())
del query.deferred
else:
query.deferred.callback(response[0][0])
del query.deferred
def badStatus(self, status, message):
query = self.current_queries.pop(0)
if pipeline_debug: self.log('failed', query.method)
try:
raise ValueError(status, message)
except:
query.deferred.errback(failure.Failure())
del query.deferred
self.transport.loseConnection()
def handleStatus(self, version, status, message):
if status != '200':
self.badStatus(status, message)
def handleHeader(self, key, val):
if not self.current_queries[0].decode:
if key.lower() == 'content-encoding' and val.lower() == 'gzip':
self.current_queries[0].decode = True
def handleResponse(self, contents):
if self.current_queries[0].decode:
s = StringIO()
s.write(contents)
s.seek(-1)
g = GzipFile(fileobj=s, mode='rb')
contents = g.read()
g.close()
self.parseResponse(contents)
class QueryFactory(object):
def __init__(self):
self.queries = []
self.instance = None
def connectionMade(self, instance):
self.instance = instance
if pipeline_debug: print 'connection made %s' % str(instance.peer)
while self.anyQueries():
self.instance.sendQuery()
def connectionLost(self, instance):
assert self.instance == instance
if pipeline_debug: print 'connection lost %s' % str(instance.peer)
self.instance = None
def prependQueries(self, queries):
self.queries = queries + self.queries
def popQuery(self):
return self.queries.pop(0)
def anyQueries(self):
return bool(self.queries)
def addQuery(self, query):
self.queries.append(query)
if pipeline_debug: print 'addQuery: %s %s' % (self.instance, self.queries)
if self.instance:
self.instance.sendQuery()
def disconnect(self):
if not self.instance:
return
if not hasattr(self.instance, 'transport'):
return
self.instance.transport.loseConnection()
class PersistantSingletonFactory(QueryFactory, SmartReconnectingClientFactory):
def clientConnectionFailed(self, connector, reason):
if pipeline_debug: print 'clientConnectionFailed %s' % str(connector)
return SmartReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
def clientConnectionLost(self, connector, unused_reason):
self.started = False
if not self.anyQueries():
self.continueTrying = False
return SmartReconnectingClientFactory.clientConnectionLost(self, connector, unused_reason)
class SingletonFactory(QueryFactory, protocol.ClientFactory):
def clientConnectionFailed(self, connector, reason):
if pipeline_debug: print 'clientConnectionFailed %s' % str(connector)
queries = list(self.queries)
del self.queries[:]
for query in queries:
query.deferred.errback(reason)
self.started = False
class Proxy:
"""A Proxy for making remote BRPC calls.
Pass the URL of the remote BRPC server to the constructor.
Use proxy.callRemote('foobar', *args) to call remote method
'foobar' with *args.
"""
def __init__(self, url, user=None, password=None, retry_forever = True):
"""
@type url: C{str}
@param url: The URL to which to post method calls. Calls will be made
over SSL if the scheme is HTTPS. If netloc contains username or
password information, these will be used to authenticate, as long as
the C{user} and C{password} arguments are not specified.
@type user: C{str} or None
@param user: The username with which to authenticate with the server
when making calls. If specified, overrides any username information
embedded in C{url}. If not specified, a value may be taken from C{url}
if present.
@type password: C{str} or None
@param password: The password with which to authenticate with the
server when making calls. If specified, overrides any password
information embedded in C{url}. If not specified, a value may be taken
from C{url} if present.
"""
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
netlocParts = netloc.split('@')
if len(netlocParts) == 2:
userpass = netlocParts.pop(0).split(':')
self.user = userpass.pop(0)
try:
self.password = userpass.pop(0)
except:
self.password = None
else:
self.user = self.password = None
hostport = netlocParts[0].split(':')
self.host = hostport.pop(0)
try:
self.port = int(hostport.pop(0))
except:
self.port = None
self.path = path
if self.path in ['', None]:
self.path = '/'
self.secure = (scheme == 'https')
if user is not None:
self.user = user
if password is not None:
self.password = password
if not retry_forever:
_Factory = SingletonFactory
else:
_Factory = PersistantSingletonFactory
self.factory = _Factory()
self.factory.started = False
self.factory.protocol = QueryProtocol
def callRemote(self, method, *args, **kwargs):
if pipeline_debug: print 'callRemote to %s : %s' % (self.host, method)
args = (args, kwargs)
query = Query(self.path, self.host, method, self.user,
self.password, *args)
self.factory.addQuery(query)
if pipeline_debug: print 'factory started: %s' % self.factory.started
if not self.factory.started:
self.factory.started = True
def connect(host):
if self.secure:
if pipeline_debug: print 'connecting to %s' % str((host, self.port or 443))
from twisted.internet import ssl
reactor.connectSSL(host, self.port or 443,
self.factory, ssl.ClientContextFactory(),
timeout=60)
else:
if pipeline_debug: print 'connecting to %s' % str((host, self.port or 80))
reactor.connectTCP(host, self.port or 80, self.factory,
timeout=60)
df = reactor.resolve(self.host)
df.addCallback(connect)
df.addErrback(query.deferred.errback)
return query.deferred
class AsyncServerProxy(object):
def __init__(self, base_url, username=None, password=None, debug=False,
retry_forever = True):
self.base_url = base_url
self.username = username
self.password = password
self.proxy = Proxy(self.base_url, self.username, self.password, retry_forever)
self.debug = debug
def __getattr__(self, attr):
return self._make_call(attr)
def _make_call(self, methodname):
return lambda *a, **kw : self._method(methodname, *a, **kw)
def _method(self, methodname, *a, **kw):
# in case they have changed
self.proxy.user = self.username
self.proxy.password = self.password
if self.debug:
print ('callRemote:', self.__class__.__name__,
self.base_url, methodname, a, kw)
df = self.proxy.callRemote(methodname, *a, **kw)
return df
class EitherServerProxy(object):
SYNC = 0
ASYNC = 1
SYNC_DEFERRED = 2 # BE CAREFUL to call getResult() on the returned Deferred!
"""Server Proxy that supports both asynchronous and synchronous calls."""
def __init__(self, base_url, username = None, password = None, debug = False,
async = ASYNC, retry_forever = True ):
"""
The EitherServerProxy can make either synchronous or asynchronous calls.
The default is specified by the async parameter to __init__, but each
individual call can override the default behavior by passing 'async' as
a boolean keyword argument to any method call. The async keyword
argument can also be set to None. However, passing async as
None means simply 'use default behavior'. When calling with async=SYNC,
you should not be in the same thread as the reactor or you risk
blocking the reactor.
@param async: determines whether the default is asynchronous or blocking calls."""
assert async in [SYNC, ASYNC, SYNC_DEFERRED]
self.async = async
self.async_proxy = AsyncServerProxy( base_url, username, password, debug,
retry_forever = retry_forever )
# HERE HACK. retry_forever is not supported by ServerProxy.
self.sync_proxy = ServerProxy( base_url )
def __getattr__(self, attr):
return self._make_call(attr)
def _make_call(self, methodname):
return lambda *a, **kw : self._method(methodname, *a, **kw)
def _method(self, methodname, *a, **kw ):
async = kw.pop('async', self.async)
if async is None:
async = self.async
if async == ASYNC:
df = self.async_proxy._method(methodname, *a, **kw)
elif async == SYNC_DEFERRED:
df = defer.execute(getattr(self.sync_proxy, methodname), *a, **kw)
else:
return self.sync_proxy.__getattr__(methodname)(*a, **kw)
return df
SYNC = EitherServerProxy.SYNC
ASYNC = EitherServerProxy.ASYNC
SYNC_DEFERRED = EitherServerProxy.SYNC_DEFERRED
__all__ = ["BRPC", "Handler", "NoSuchFunction", "Fault", "Proxy", "AsyncServerProxy", "EitherServerProxy"]
| mit |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/distutils/__init__.py | 5 | 4374 | import os
import sys
import warnings
# opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
import opcode
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), "distutils")
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn("The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path) # noqa: F821
if sys.version_info < (3, 4):
import imp
real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ("", "", imp.PKG_DIRECTORY))
else:
import importlib.machinery
distutils_path = os.path.join(distutils_path, "__init__.py")
loader = importlib.machinery.SourceFileLoader("_virtualenv_distutils", distutils_path)
if sys.version_info < (3, 5):
import types
real_distutils = types.ModuleType(loader.name)
else:
import importlib.util
spec = importlib.util.spec_from_loader(loader.name, loader)
real_distutils = importlib.util.module_from_spec(spec)
loader.exec_module(real_distutils)
# Copy the relevant attributes
try:
__revision__ = real_distutils.__revision__
except AttributeError:
pass
__version__ = real_distutils.__version__
from distutils import dist, sysconfig # isort:skip
try:
basestring
except NameError:
basestring = str
# patch build_ext (distutils doesn't know how to get the libs directory
# path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == "win32":
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options(self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
# distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
if os.name == "posix":
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith("pydistutils.cfg"):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
# distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == "win32":
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and "LIBDIR" not in real_vars:
real_vars["LIBDIR"] = lib_dir # asked for all
elif isinstance(real_vars, list) and "LIBDIR" in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
| gpl-3.0 |
hoeck/webkitwindow | webkitwindow.py | 1 | 27686 | import sys
import os
import Queue
import StringIO
import urlparse
import mimetypes
import pkgutil
import itertools
try:
from PyQt4 import QtCore, QtGui, QtWebKit, QtNetwork
except ImportError:
from PySide import QtCore, QtGui, QtWebKit, QtNetwork
HTTP_STATUS = {
200: 'OK',
301: 'Moved Permanently',
302: 'Found',
400: 'Bad Request',
404: 'Not Found',
406: 'Not Acceptable',
500: 'Internal Server Error',
503: 'Service Unavailable',
}
class Message():
"""An HTTP message.
headers must be a dict of {str: str/unicode}. (unicode gets
converted to an utf8 string)
body must be either None, str. When unicode, convert it to an utf8
string, else convert it to a str.
"""
def __init__(self, headers={}, body=None):
self.headers = {}
for k,v in headers.items():
assert isinstance(k, basestring), "header keys must be strings, not: %r" % (k, )
if isinstance(v, unicode):
v = v.decode('utf-8')
elif isinstance(v, str):
pass
else:
assert False, "header values must be strings or unicode, not: %r" % (v, )
self.headers[k] = v
if isinstance(body, unicode):
self.body = body.encode('utf-8')
elif isinstance(body, str):
self.body = body
elif body is None:
self.body = ""
else:
self.body = str(body)
self._write_fn = None
self._close_fn = None
# streaming response data
def _set_streaming(self, write_fn, close_fn):
self._write_fn = write_fn
self._close_fn = close_fn
def write(self, data):
"""Write data for a streaming response.
Return True on success, False otherwise.
"""
if not self._write_fn:
raise Exception("not a streaming response")
if data:
return self._write_fn(data)
return False
def close(self):
"""Close the streaming response.
Return True on success, False otherwise.
"""
if not self._write_fn:
raise Exception("not a streaming response")
return self._close_fn()
def _parse_url(obj, url):
"""Parse url and add the resulting parts as url_* attrs to obj."""
r = urlparse.urlparse(url)
obj.url_scheme = r.scheme
obj.url_netloc = r.netloc
obj.url_path = r.path
obj.url_params = r.params
obj.url_query = r.query
obj.url_query_dict = urlparse.parse_qs(r.query)
obj.url_fragment = r.fragment
def guess_type(name, default="application/octet-stream"):
"""Given a path to a file, guess its mimetype."""
guessed_type, encoding = mimetypes.guess_type(name, strict=False)
return guessed_type or default
class Request():
def __init__(self, method, url, message, fake_reply):
self.message = message
self.method = method
self.url = url
self.fake_reply = fake_reply
self._streaming = False
_parse_url(self, url)
def respond(self, status=None, message=None, streaming=False):
"""Respond to this request with a Message.
If streaming is True, initiate a streaming response. Stream
data using the passed messages .write(data) method and end the
request with .close().
Returns True when the reply was initiated successfully, False
if it failed (e.g. when the client has already closed the
connection).
"""
assert isinstance(message, Message)
status = status or 200
if isinstance(status, (int, long)):
status_text = HTTP_STATUS.get(status, '')
elif isinstance(status, (tuple, list)):
status, status_text = status
status = int(status or 200)
status_text = str(status_text or '')
elif isinstance(status, basestring):
status, status_text = status.split(' ', 1)
status = int(status)
else:
raise TypeError("status must be a number or tuple of (status, text), not: %r" % (status, ))
if streaming:
def _write_fn(data):
if self.fake_reply.aborted:
return False
self.fake_reply.fake_response_write.emit(str(data))
return True
def _close_fn():
if self.fake_reply.aborted:
return False
self.fake_reply.fake_response_close.emit()
return True
message._set_streaming(write_fn=_write_fn, close_fn=_close_fn)
if self.fake_reply.aborted:
return False
else:
self.fake_reply.fake_response.emit(status, status_text, message, True)
if message.body is not None:
message.write(message.body)
return True
else:
if self.fake_reply.aborted:
return False
else:
self.fake_reply.fake_response.emit(status, status_text, message, False)
return True
# response shortcuts
def notfound(self, msg=""):
"""Respond with '404 Not Found' and an optional message."""
return self.respond((404, 'Not Found'), Message({'Content-Type': 'text/plain'}, msg))
def gone(self, msg=""):
"""Respond with a '410 Gone' and an optional message."""
return self.respond((404, 'Not Found'), Message({'Content-Type': 'text/plain'}, msg))
def redirect(self, url):
"""Respond with a 302 Found to url."""
return self.respond((302, 'Found'), Message({'Location': url}))
def found(self, body, content_type="text/plain"):
"""Respond with a 200, data and content_type."""
return self.respond((200, 'Found'), Message({"Content-Type": content_type}, body))
def found_resource(self, path, module_name, content_type=None, modify_fn=None):
"""Respond with a 200 and a resource file loaded using pkgutil.get_data.
module_name and path are passed to pkgutil.get_data.
Optionally run modify_fn on the returned string (e.g. to fill a template).
Example to deliver a file from the webkitwindow.resources directory:
req.found_resource(path='/styles.css',
module_name='webkitwindow.resources',
modify_fn=lambda s: s.replace('TODAY', datetime.datetime.now()))
"""
res_string = pkgutil.get_data(module_name, path)
if modify_fn:
res_string = modify_fn(res_string)
return self.found(body=res_string, content_type=content_type or guess_type(path))
def found_file(self, path, content_type=None):
"""Respond with a 200 and the file at path, optionally using content_type."""
with open(path) as f:
return self.found(body=f.read(), content_type=content_type or guess_type(path))
class WebSocket():
# create and pass this to NetworkHandler in the WebSocketBackend class
def __init__(self, url, backend, id):
self.url = url
self._backend = backend
self._id = id
_parse_url(self, url)
def connected(self):
"""Confirm a connection."""
self._backend.onopen.emit(self._id)
def send(self, data):
"""Send data over an opened connection."""
self._backend.send_to_client(self._id, data)
def close(self):
"""Close the connection."""
self._backend.server_close(self._id)
class NetworkHandler():
"""A Class dealing with requests from the embedded webkit.
Subclass or ducktype it to implement your own request/websocket
handlers.
"""
def startup(self, window):
"""Called after application startup.
window is the created WebkitWindow instance.
"""
pass
# HTTP
def request(self, request):
"""Incoming Request.
Use request.respond(message) to respond.
"""
pass
# WebSocket
def connect(self, websocket):
"""Incoming WebSocket conncetion.
Call .connected() on the provided websocket object to confirm the connection
Call .close() to close or abort the connection.
"""
pass
def receive(self, websocket, data):
"""Incoming WebSocket data.
Call .send() on the provided websocket object to send data back.
"""
pass
def close(self, websocket):
"""Client has closed the websocket connection."""
pass
class AnyValue(QtCore.QObject):
def __init__(self, value):
self.value = value
class AsyncNetworkHandler(QtCore.QObject):
_request = QtCore.pyqtSignal(object)
_connect = QtCore.pyqtSignal(object)
_receive = QtCore.pyqtSignal(object, str)
_close = QtCore.pyqtSignal(object)
def __init__(self, network_handler):
super(AsyncNetworkHandler, self).__init__()
self._nh = network_handler
self._request.connect(self.request)
self._connect.connect(self.connect)
self._receive.connect(self.receive)
self._close.connect(self.close)
# HTTP
@QtCore.pyqtSlot(object)
def request(self, request):
self._nh.request(request)
# object
@QtCore.pyqtSlot(object)
def connect(self, websocket):
self._nh.connect(websocket)
@QtCore.pyqtSlot(object, str)
def receive(self, websocket, data):
self._nh.receive(websocket, unicode(data))
@QtCore.pyqtSlot(object)
def close(self, websocket):
self._nh.close(websocket)
class LocalDispatchNetworkAccessManager(QtNetwork.QNetworkAccessManager):
"""
Custom NetworkAccessManager to intercept requests and dispatch them locally.
"""
operation_strings = {
QtNetwork.QNetworkAccessManager.HeadOperation: 'HEAD',
QtNetwork.QNetworkAccessManager.GetOperation: 'GET',
QtNetwork.QNetworkAccessManager.PutOperation: 'PUT',
QtNetwork.QNetworkAccessManager.PostOperation: 'POST',
QtNetwork.QNetworkAccessManager.DeleteOperation: 'DELETE',
QtNetwork.QNetworkAccessManager.CustomOperation: None,
}
def set_network_handler(self, network_handler):
# overwriting the ctor with new arguments is not allowed -> use a setter instead
self.network_handler = network_handler
def createRequest(self, operation, request, data):
reply = None
# decode operation (== request method)
op_str = self.operation_strings[operation]
if op_str:
method = op_str
else:
# custom
method = str(request.attribute(QNetwork.QNetworkRequest.CustomVerbAttribute).toString())
url = str(request.url().toString())
headers = dict((str(h),str(request.rawHeader(h))) for h in request.rawHeaderList())
# data is a QIODevice or None
msg = Message(headers=headers, body=data and str(data.readAll()))
reply = FakeReply(self, request, operation)
self.network_handler._request.emit(Request(method=method, url=url, message=msg, fake_reply=reply)) # will .set_response the FakeReply to reply
QtCore.QTimer.singleShot(0, lambda:self.finished.emit(reply))
return reply
class FakeReply(QtNetwork.QNetworkReply):
"""
QNetworkReply implementation that returns a given response.
"""
fake_response = QtCore.pyqtSignal(int, str, object, object)
fake_response_write = QtCore.pyqtSignal(object)
fake_response_close = QtCore.pyqtSignal()
def __init__(self, parent, request, operation):
QtNetwork.QNetworkReply.__init__(self, parent)
self.fake_response.connect(self._fake_response)
self.fake_response_write.connect(self._fake_response_write)
self.fake_response_close.connect(self._fake_response_close)
self._streaming = False
self._content = None
self._offset = 0
# know when to stop writing into the reply
self.aborted = False
self.setRequest(request)
self.setUrl(request.url())
self.setOperation(operation)
self.open(self.ReadOnly | self.Unbuffered)
@QtCore.pyqtSlot(int, str, object, object)
def _fake_response(self, status, status_text, response, streaming):
assert isinstance(response, Message)
# status
self.setAttribute(QtNetwork.QNetworkRequest.HttpStatusCodeAttribute, status)
self.setAttribute(QtNetwork.QNetworkRequest.HttpReasonPhraseAttribute, status_text)
# headers
for k,v in response.headers.items():
self.setRawHeader(QtCore.QByteArray(k), QtCore.QByteArray(v))
if streaming:
# streaming response, call fake_response_write and fake_response_close
self._streaming = True
self._content = StringIO.StringIO()
else:
self._content = response.body
self._offset = 0
# respond immediately
if self._content and not 'Content-Length' in response.headers:
self.setHeader(QtNetwork.QNetworkRequest.ContentLengthHeader, QtCore.QVariant(len(self._content)))
QtCore.QTimer.singleShot(0, lambda : self.readyRead.emit())
QtCore.QTimer.singleShot(0, lambda : self.finished.emit())
@QtCore.pyqtSlot(object)
def _fake_response_write(self, response):
assert isinstance(response, basestring)
assert self._streaming, "not a streaming response"
self._content.write(response)
self.readyRead.emit()
@QtCore.pyqtSlot()
def _fake_response_close(self):
assert self._streaming, "not a streaming response"
self.finished.emit()
def abort(self):
self.aborted = True
self.finished.emit()
def bytesAvailable(self):
if isinstance(self._content, StringIO.StringIO):
c = self._content.getvalue()
else:
c = self._content
avail = long(len(c) - self._offset + super(FakeReply, self).bytesAvailable())
return avail
def isSequential(self):
return True
def readData(self, max_size):
if isinstance(self._content, StringIO.StringIO):
c = self._content.getvalue()
else:
c = self._content
if self._offset < len(c):
size = min(max_size, len(c)-self._offset)
data = c[self._offset:self._offset+size]
self._offset += size
return data
else:
return None
class WebSocketBackend(QtCore.QObject):
# javascript websocket events fo the given connection_id
onmessage = QtCore.pyqtSignal(int, str)
onopen = QtCore.pyqtSignal(int)
onclose = QtCore.pyqtSignal(int)
def __init__(self, network_handler):
super(WebSocketBackend, self).__init__()
self._connections = {}
self._ids = itertools.count()
self._network_handler = network_handler
@QtCore.pyqtSlot(str, result=int)
def connect(self, url):
"""Create a websocket connection."""
id = self._ids.next()
ws = WebSocket(str(url), self, id)
self._connections[id] = ws
QtCore.QTimer.singleShot(0, lambda: self._network_handler._connect.emit(ws)) #??????
return id
@QtCore.pyqtSlot(int)
def client_close(self, id):
"""Close the given websocket connection, initiated from the client."""
self._network_handler._close.emit(self._connections[id])
del self._connections[id]
def server_close(self, id):
"""Close the given websocket connection, initiated from the server."""
del self._connections[id]
self.onclose.emit(id)
@QtCore.pyqtSlot(int, str)
def send_to_server(self, id, data):
"""Send data on the given websocket connection to the network_handler."""
self._network_handler._receive.emit(self._connections[id], data)
def send_to_client(self, id, data):
"""Send data from the backend to the given websocket in the browser."""
assert self._connections[id]
self.onmessage.emit(id, data)
class CustomQWebPage(QtWebKit.QWebPage):
"""QWebPage subclass to be able to implement shouldInterruptJavaScript.
See http://doc.qt.io/qt-4.8/qwebpage.html#shouldInterruptJavaScript
Additionally provides a configurable javascript console message
handler, possible values:
'print' .. print the console message to stdout (the default)
function .. call function on each message with a dict of
message, line_number and source_id keys
None .. do nothing
The underlying javaScriptConsoleMessage method will be called for
console.log() calls, ignoring everything but the first args and
for javascript errors.
TODO:
- allow for customization of shouldInterruptJavaScript
- custom settings for each created iframe
- implement the other javascript* handlers (alert, prompt, confirm
"""
def __init__(self, console_message='print'):
self._console_message = console_message
QtWebKit.QWebPage.__init__(self)
@QtCore.pyqtSlot(result=bool)
def shouldInterruptJavaScript(self):
return False
def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
if self._console_message == 'print':
print u'js-console: {} ({}:{})'.format(unicode(message),
unicode(sourceID),
unicode(lineNumber)).encode('utf-8', 'ignore')
elif self._console_message:
self._console_message({'message': unicode(message),
'line_number': unicode(lineNumber),
'source_id': unicode(sourceID)})
else:
pass
class _WebkitWindow(QtGui.QMainWindow):
_close_window = QtCore.pyqtSignal()
_set_zoom_factor = QtCore.pyqtSignal(float)
def __init__(self, network_handler, url=None, console_message='print', no_focus_classname=None):
self._console_message = console_message
self.url = url or "http://localhost"
self.network_handler = AsyncNetworkHandler(network_handler)
self.no_focus_classname = no_focus_classname
QtGui.QMainWindow.__init__(self)
self.setup()
self._set_zoom_factor.connect(self.zoom_factor)
def setup(self):
centralwidget = QtGui.QWidget()
centralwidget.setObjectName("centralwidget")
horizontalLayout = QtGui.QHBoxLayout(centralwidget)
horizontalLayout.setObjectName("horizontalLayout")
self.webview = QtWebKit.QWebView(centralwidget)
webpage = CustomQWebPage(console_message=self._console_message)
# set the custom NAM
nam = LocalDispatchNetworkAccessManager()
nam.set_network_handler(self.network_handler)
webpage.setNetworkAccessManager(nam)
# websocket requests do not go through the custom NAM
# -> catch them in the javascript directly
self.websocket_backend = WebSocketBackend(self.network_handler)
self.setup_local_websockets(webpage)
self.webview.setPage(webpage)
# implement the custom focus rule for iframes
self.setup_micro_focus_handler(webpage)
horizontalLayout.addWidget(self.webview)
horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.setCentralWidget(centralwidget)
self.webview.setUrl(QtCore.QUrl(self.url))
# setup webkit
gs = QtWebKit.QWebSettings.globalSettings()
gs.setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
gs.setAttribute(QtWebKit.QWebSettings.JavascriptEnabled, True)
gs.setAttribute(QtWebKit.QWebSettings.AutoLoadImages, True)
gs.setAttribute(QtWebKit.QWebSettings.JavascriptCanOpenWindows, True)
gs.setAttribute(QtWebKit.QWebSettings.DeveloperExtrasEnabled, True)
gs.setAttribute(QtWebKit.QWebSettings.LocalContentCanAccessRemoteUrls, True)
# setup app details
QtGui.QApplication.setApplicationName("Panel")
QtGui.QApplication.setOrganizationName("Panel")
# close slot
def _close_handler():
# without resetting the QtWebView widget, I get segfaults
# when closing this window
self.setCentralWidget(QtGui.QWidget())
self.close()
self._close_window.connect(_close_handler)
### Capturing Websocket Connections
# For WebSockets, QtWebKit does not use the
# QNetworkAccessManager. Thus we 'intercept' WebSocket connection
# attempts by adding our own implementation of the WebSocket
# interface to the javascript window context of each new frame.
websocket_js = """
/**
* Provide a Websocket interface that uses a QT object (_wsExt)
* instead of the network to be able to proxy the websocket
* communication.
*/
(function() {
// pass the local interfacing object via window globals
var wsExt = window._wsExt;
window._wsExt = undefined;
window.WebSocket = function(url) {
var self = this, connId;
self.CONNECTING = 0; // The connection has not yet been established.
self.OPEN = 1; // The WebSocket connection is established and communication is possible.
self.CLOSING = 2; // The connection is going through the closing handshake.
self.CLOSED = 4; // The connection has been closed or could not be opened.
self.url = url;
self.readyState = self.CONNECTING;
self.extensions = "";
self.protocol = "";
self.onopen = undefined;
self.onmessage = undefined;
self.onerror = undefined;
self.onclose = undefined;
self.send = function(data) {
wsExt.send_to_server(connId, data);
};
self.close = function(code, reason) {
if (self.readyState === self.CLOSING || self.readyState === self.CLOSED) {
// nothing
} else if (self.readyState === self.OPEN) {
self.readyState = self.CLOSING;
wsExt.close(connId);
if (self.onclose) {
self.onclose();
}
} else {
self.readyState == CLOSED;
}
};
// register callbacks on the Qt side
wsExt.onopen.connect(function(id) {
if (id === connId) {
self.readyState = self.OPEN;
if (self.onopen) {
self.onopen();
}
}
});
wsExt.onmessage.connect(function(id, data) {
if (id === connId) {
if (self.onmessage) {
self.onmessage({data:data});
}
}
});
wsExt.onclose.connect(function(id) {
if (id === connId) {
self.readyState = self.CLOSED;
if (self.onclose) {
self.onclose();
}
}
});
// init
connId = wsExt.connect(url);
};
})();
"""
def setup_local_websockets_on_frame(self, qwebframe):
def _load_js(f=qwebframe, js=self.websocket_js, websocket_backend=self.websocket_backend):
# without passing arguments as default keyword arguments, I get strange errors:
# "NameError: free variable 'self' referenced before assignment in enclosing scope"
# which looks like sombody is trying to null all local
# arguments at the end of my function
f.addToJavaScriptWindowObject("_wsExt", websocket_backend)
f.evaluateJavaScript(js)
# TODO: 'dispose' the websocket object when the frame is gone (e.g. after reload)
qwebframe.javaScriptWindowObjectCleared.connect(_load_js)
def setup_local_websockets(self, qwebpage):
qwebpage.frameCreated.connect(lambda frame: self.setup_local_websockets_on_frame(frame))
def setup_micro_focus_handler(self, qwebpage):
"""Allow defining IFRAMEs that can't be focused.
All iframes that have a css class of `.no_focus_classname` set
will pass their (keyboard) focus back to their parent.
"""
def _steal_focus_from_frame():
p = qwebpage.currentFrame().parentFrame()
if p:
# blindly assume that .findAllElements and childFrames
# return things in the *same* order
for e,f in zip(p.findAllElements('iframe'), p.childFrames()):
if f.hasFocus() and self.no_focus_classname in list(e.classes()):
# TODO: break circles in case `p` is trying to
# assign the focus back to `f`
p.setFocus()
if self.no_focus_classname:
qwebpage.microFocusChanged.connect(_steal_focus_from_frame)
@QtCore.pyqtSlot(float)
def zoom_factor(self, zf=None):
"""Get or set the zoom factor for the embedded webview."""
if zf == None:
return self.webview.zoomFactor()
else:
assert isinstance(zf, float)
self.webview.setZoomFactor(zf)
class WebkitWindow(object):
@classmethod
def run(self, handler, url="http://localhost", exit=True, console_message='print', no_focus_classname=None):
"""Open a window displaying a single webkit instance.
handler must be an object implementing the NetworkHandler
interface (or deriving from it).
Navigate the webkit to url after opening it.
console_message ('print', function that receives a dict or
None) controls how to deal with javascript console messages,
see CustomQWebPage.
no_focus_classname should be a css classname that, when set on
an iframe element, will prevent this element from being
focused permanently - it will pass the focus back to its
parent iframe. Use None (the default) to turn this feature
off.
If exit is true, sys.exit after closing the window.
"""
win = self(handler, url, exit, console_message, no_focus_classname)
return win._run()
@staticmethod
def run_later(f, timeout=None):
"""Enqueue and run function f on the main thread."""
QtCore.QTimer.singleShot(timeout or 0, f)
def __init__(self, handler, url, exit, console_message, no_focus_classname):
self._handler = handler
self._url = url
self._exit = exit
self._console_message = console_message
self._no_focus_classname = no_focus_classname
def _run(self):
app = QtGui.QApplication(sys.argv)
self._window = _WebkitWindow(self._handler, self._url, self._console_message, self._no_focus_classname)
self._window.show()
if getattr(self._handler, 'startup', None):
self.run_later(lambda:self._handler.startup(self))
if self._exit:
sys.exit(app.exec_())
else:
return app.exec_()
def close(self):
"""Close this WebkitWindow and exit."""
self._window._close_window.emit()
def zoom_factor(self, zoom_factor=None):
"""Get or set the zoom factor."""
if zoom_factor == None:
return self._window.zoom_factor()
else:
assert isinstance(zoom_factor, (int, long, float))
self._window._set_zoom_factor.emit(float(zoom_factor))
| bsd-3-clause |
sivas2811/mocha_739 | hotdot_env/lib/python2.7/site-packages/setuptools/_backport/hashlib/_sha512.py | 77 | 14505 | """
This code was Ported from CPython's sha512module.c
"""
import struct
SHA_BLOCKSIZE = 128
SHA_DIGESTSIZE = 64
def new_shaobject():
return {
'digest': [0]*8,
'count_lo': 0,
'count_hi': 0,
'data': [0]* SHA_BLOCKSIZE,
'local': 0,
'digestsize': 0
}
ROR64 = lambda x, y: (((x & 0xffffffffffffffff) >> (y & 63)) | (x << (64 - (y & 63)))) & 0xffffffffffffffff
Ch = lambda x, y, z: (z ^ (x & (y ^ z)))
Maj = lambda x, y, z: (((x | y) & z) | (x & y))
S = lambda x, n: ROR64(x, n)
R = lambda x, n: (x & 0xffffffffffffffff) >> n
Sigma0 = lambda x: (S(x, 28) ^ S(x, 34) ^ S(x, 39))
Sigma1 = lambda x: (S(x, 14) ^ S(x, 18) ^ S(x, 41))
Gamma0 = lambda x: (S(x, 1) ^ S(x, 8) ^ R(x, 7))
Gamma1 = lambda x: (S(x, 19) ^ S(x, 61) ^ R(x, 6))
def sha_transform(sha_info):
W = []
d = sha_info['data']
for i in xrange(0,16):
W.append( (d[8*i]<<56) + (d[8*i+1]<<48) + (d[8*i+2]<<40) + (d[8*i+3]<<32) + (d[8*i+4]<<24) + (d[8*i+5]<<16) + (d[8*i+6]<<8) + d[8*i+7])
for i in xrange(16,80):
W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffffffffffff )
ss = sha_info['digest'][:]
def RND(a,b,c,d,e,f,g,h,i,ki):
t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xffffffffffffffff
t1 = (Sigma0(a) + Maj(a, b, c)) & 0xffffffffffffffff
d = (d + t0) & 0xffffffffffffffff
h = (t0 + t1) & 0xffffffffffffffff
return d & 0xffffffffffffffff, h & 0xffffffffffffffff
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98d728ae22)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x7137449123ef65cd)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcfec4d3b2f)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba58189dbbc)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817)
dig = []
for i, x in enumerate(sha_info['digest']):
dig.append( (x + ss[i]) & 0xffffffffffffffff )
sha_info['digest'] = dig
def sha_init():
sha_info = new_shaobject()
sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
sha_info['count_lo'] = 0
sha_info['count_hi'] = 0
sha_info['local'] = 0
sha_info['digestsize'] = 64
return sha_info
def sha384_init():
sha_info = new_shaobject()
sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4]
sha_info['count_lo'] = 0
sha_info['count_hi'] = 0
sha_info['local'] = 0
sha_info['digestsize'] = 48
return sha_info
def getbuf(s):
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return str(s)
else:
return buffer(s)
def sha_update(sha_info, buffer):
count = len(buffer)
buffer_idx = 0
clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff
if clo < sha_info['count_lo']:
sha_info['count_hi'] += 1
sha_info['count_lo'] = clo
sha_info['count_hi'] += (count >> 29)
if sha_info['local']:
i = SHA_BLOCKSIZE - sha_info['local']
if i > count:
i = count
# copy buffer
for x in enumerate(buffer[buffer_idx:buffer_idx+i]):
sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0]
count -= i
buffer_idx += i
sha_info['local'] += i
if sha_info['local'] == SHA_BLOCKSIZE:
sha_transform(sha_info)
sha_info['local'] = 0
else:
return
while count >= SHA_BLOCKSIZE:
# copy buffer
sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]]
count -= SHA_BLOCKSIZE
buffer_idx += SHA_BLOCKSIZE
sha_transform(sha_info)
# copy buffer
pos = sha_info['local']
sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]]
sha_info['local'] = count
def sha_final(sha_info):
lo_bit_count = sha_info['count_lo']
hi_bit_count = sha_info['count_hi']
count = (lo_bit_count >> 3) & 0x7f
sha_info['data'][count] = 0x80;
count += 1
if count > SHA_BLOCKSIZE - 16:
# zero the bytes in data after the count
sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
sha_transform(sha_info)
# zero bytes in data
sha_info['data'] = [0] * SHA_BLOCKSIZE
else:
sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
sha_info['data'][112] = 0;
sha_info['data'][113] = 0;
sha_info['data'][114] = 0;
sha_info['data'][115] = 0;
sha_info['data'][116] = 0;
sha_info['data'][117] = 0;
sha_info['data'][118] = 0;
sha_info['data'][119] = 0;
sha_info['data'][120] = (hi_bit_count >> 24) & 0xff
sha_info['data'][121] = (hi_bit_count >> 16) & 0xff
sha_info['data'][122] = (hi_bit_count >> 8) & 0xff
sha_info['data'][123] = (hi_bit_count >> 0) & 0xff
sha_info['data'][124] = (lo_bit_count >> 24) & 0xff
sha_info['data'][125] = (lo_bit_count >> 16) & 0xff
sha_info['data'][126] = (lo_bit_count >> 8) & 0xff
sha_info['data'][127] = (lo_bit_count >> 0) & 0xff
sha_transform(sha_info)
dig = []
for i in sha_info['digest']:
dig.extend([ ((i>>56) & 0xff), ((i>>48) & 0xff), ((i>>40) & 0xff), ((i>>32) & 0xff), ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
return ''.join([chr(i) for i in dig])
class sha512(object):
digest_size = digestsize = SHA_DIGESTSIZE
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
self._sha = sha_init()
if s:
sha_update(self._sha, getbuf(s))
def update(self, s):
sha_update(self._sha, getbuf(s))
def digest(self):
return sha_final(self._sha.copy())[:self._sha['digestsize']]
def hexdigest(self):
return ''.join(['%.2x' % ord(i) for i in self.digest()])
def copy(self):
new = sha512.__new__(sha512)
new._sha = self._sha.copy()
return new
class sha384(sha512):
digest_size = digestsize = 48
def __init__(self, s=None):
self._sha = sha384_init()
if s:
sha_update(self._sha, getbuf(s))
def copy(self):
new = sha384.__new__(sha384)
new._sha = self._sha.copy()
return new
if __name__ == "__main__":
a_str = "just a test string"
assert sha512().hexdigest() == "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
assert sha512(a_str).hexdigest() == "68be4c6664af867dd1d01c8d77e963d87d77b702400c8fabae355a41b8927a5a5533a7f1c28509bbd65c5f3ac716f33be271fbda0ca018b71a84708c9fae8a53"
assert sha512(a_str*7).hexdigest() == "3233acdbfcfff9bff9fc72401d31dbffa62bd24e9ec846f0578d647da73258d9f0879f7fde01fe2cc6516af3f343807fdef79e23d696c923d79931db46bf1819"
s = sha512(a_str)
s.update(a_str)
assert s.hexdigest() == "341aeb668730bbb48127d5531115f3c39d12cb9586a6ca770898398aff2411087cfe0b570689adf328cddeb1f00803acce6737a19f310b53bbdb0320828f75bb"
| unlicense |
moreati/ppeg | setup.py | 1 | 1037 | #!/usr/bin/env python
import io
import os
from setuptools import setup, Extension
def read(fname, encoding='utf-8'):
here = os.path.dirname(__file__)
with io.open(os.path.join(here, fname), encoding=encoding) as f:
return f.read()
setup (
name='PPeg',
version='0.9.4',
description="A Python port of Lua's LPeg pattern matching library",
long_description=read('README.rst'),
url='https://github.com/moreati/ppeg',
author='Alex Willmer',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Text Processing :: General',
],
keywords='parsing peg grammar regex',
ext_modules = [Extension('_ppeg', ['_ppeg.c', 'lpeg.c']),
Extension('_cpeg', ['_cpeg.c'])],
py_modules=[
'PythonImpl',
'pegmatcher',
],
)
| mit |
tomSny/XStarLogViewer | LogAnalyzer/py2exe/LogAnalyzer.py | 3 | 12537 | #!/usr/bin/env python
#
# A module to analyze and identify any common problems which can be determined from log files
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
# some logging oddities noticed while doing this, to be followed up on:
# - tradheli MOT labels Mot1,Mot2,Mot3,Mot4,GGain
# - Pixhawk doesn't output one of the FMT labels... forget which one
# - MAG offsets seem to be constant (only seen data on Pixhawk)
# - MAG offsets seem to be cast to int before being output? (param is -84.67, logged as -84)
# - copter+plane use 'V' in their vehicle type/version/build line, rover uses lower case 'v'. Copter+Rover give a build number, plane does not
# - CTUN.ThrOut on copter is 0-1000, on plane+rover it is 0-100
# TODO: add test for noisy baro values
# TODO: support loading binary log files (use Tridge's mavlogdump?)
import DataflashLog
import pprint # temp
import imp
import glob
import inspect
import os, sys
import argparse
import datetime
import time
from xml.sax.saxutils import escape
from VehicleType import VehicleType
class TestResult(object):
'''all tests return a standardized result type'''
class StatusType:
# NA means not applicable for this log (e.g. copter tests against a plane log), UNKNOWN means it is missing data required for the test
GOOD, FAIL, WARN, UNKNOWN, NA = range(5)
status = None
statusMessage = "" # can be multi-line
class Test(object):
'''base class to be inherited by log tests. Each test should be quite granular so we have lots of small tests with clear results'''
def __init__(self):
self.name = ""
self.result = None # will be an instance of TestResult after being run
self.execTime = None
self.enable = True
def run(self, logdata, verbose=False):
pass
class TestSuite(object):
'''registers test classes, loading using a basic plugin architecture, and can run them all in one run() operation'''
def __init__(self):
self.tests = []
self.logfile = None
self.logdata = None
# dynamically load in Test subclasses from the 'tests' folder
# to prevent one being loaded, move it out of that folder, or set that test's .enable attribute to False
dirName = os.path.dirname(os.path.abspath(__file__))
dirName = dirName.replace('library.zip','')
testScripts = glob.glob(dirName + '/tests/*.py')
testClasses = []
for script in testScripts:
m = imp.load_source("m",script)
for name, obj in inspect.getmembers(m, inspect.isclass):
if name not in testClasses and inspect.getsourcefile(obj) == script:
testClasses.append(name)
self.tests.append(obj())
# and here's an example of explicitly loading a Test class if you wanted to do that
# m = imp.load_source("m", dirName + '/tests/TestBadParams.py')
# self.tests.append(m.TestBadParams())
def run(self, logdata, verbose):
'''run all registered tests in a single call, gathering execution timing info'''
self.logdata = logdata
if 'GPS' not in self.logdata.channels and 'GPS2' in self.logdata.channels:
# *cough*
self.logdata.channels['GPS'] = self.logdata.channels['GPS2']
self.logfile = logdata.filename
for test in self.tests:
# run each test in turn, gathering timing info
if test.enable:
startTime = time.time()
test.run(self.logdata, verbose) # RUN THE TEST
endTime = time.time()
test.execTime = 1000 * (endTime-startTime)
def outputPlainText(self, outputStats):
'''output test results in plain text'''
print 'Dataflash log analysis report for file: ' + self.logfile
print 'Log size: %.2fmb (%d lines)' % (self.logdata.filesizeKB / 1024.0, self.logdata.lineCount)
print 'Log duration: %s' % str(datetime.timedelta(seconds=self.logdata.durationSecs)) + '\n'
if self.logdata.vehicleType == VehicleType.Copter and self.logdata.getCopterType():
print 'Vehicle Type: %s (%s)' % (self.logdata.vehicleTypeString, self.logdata.getCopterType())
else:
print 'Vehicle Type: %s' % self.logdata.vehicleTypeString
print 'Firmware Version: %s (%s)' % (self.logdata.firmwareVersion, self.logdata.firmwareHash)
print 'Hardware: %s' % self.logdata.hardwareType
print 'Free RAM: %s' % self.logdata.freeRAM
if self.logdata.skippedLines:
print "\nWARNING: %d malformed log lines skipped during read" % self.logdata.skippedLines
print '\n'
print "Test Results:"
for test in self.tests:
if not test.enable:
continue
statusMessageFirstLine = test.result.statusMessage.strip('\n\r').split('\n')[0]
statusMessageExtra = test.result.statusMessage.strip('\n\r').split('\n')[1:]
execTime = ""
if outputStats:
execTime = " (%6.2fms)" % (test.execTime)
if test.result.status == TestResult.StatusType.GOOD:
print " %20s: GOOD %-55s%s" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.FAIL:
print " %20s: FAIL %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.WARN:
print " %20s: WARN %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.NA:
# skip any that aren't relevant for this vehicle/hardware/etc
continue
else:
print " %20s: UNKNOWN %-55s%s" % (test.name, statusMessageFirstLine, execTime)
#if statusMessageExtra:
for line in statusMessageExtra:
print " %29s %s" % ("",line)
print '\n'
print 'The Log Analyzer is currently BETA code.\nFor any support or feedback on the log analyzer please email Andrew Chapman ([email protected])'
print '\n'
def outputXML(self, xmlFile):
'''output test results to an XML file'''
# open the file for writing
xml = None
try:
if xmlFile == '-':
xml = sys.stdout
else:
xml = open(xmlFile, 'w')
except:
sys.stderr.write("Error opening output xml file: %s" % xmlFile)
sys.exit(1)
# output header info
print >>xml, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
print >>xml, "<loganalysis>"
print >>xml, "<header>"
print >>xml, " <logfile>" + escape(self.logfile) + "</logfile>"
print >>xml, " <sizekb>" + escape(`self.logdata.filesizeKB`) + "</sizekb>"
print >>xml, " <sizelines>" + escape(`self.logdata.lineCount`) + "</sizelines>"
print >>xml, " <duration>" + escape(str(datetime.timedelta(seconds=self.logdata.durationSecs))) + "</duration>"
print >>xml, " <vehicletype>" + escape(self.logdata.vehicleTypeString) + "</vehicletype>"
if self.logdata.vehicleType == VehicleType.Copter and self.logdata.getCopterType():
print >>xml, " <coptertype>" + escape(self.logdata.getCopterType()) + "</coptertype>"
print >>xml, " <firmwareversion>" + escape(self.logdata.firmwareVersion) + "</firmwareversion>"
print >>xml, " <firmwarehash>" + escape(self.logdata.firmwareHash) + "</firmwarehash>"
print >>xml, " <hardwaretype>" + escape(self.logdata.hardwareType) + "</hardwaretype>"
print >>xml, " <freemem>" + escape(`self.logdata.freeRAM`) + "</freemem>"
print >>xml, " <skippedlines>" + escape(`self.logdata.skippedLines`) + "</skippedlines>"
print >>xml, "</header>"
# output parameters
print >>xml, "<params>"
for param, value in self.logdata.parameters.items():
print >>xml, " <param name=\"%s\" value=\"%s\" />" % (param,escape(`value`))
print >>xml, "</params>"
# output test results
print >>xml, "<results>"
for test in self.tests:
if not test.enable:
continue
print >>xml, " <result>"
if test.result.status == TestResult.StatusType.GOOD:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>GOOD</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
elif test.result.status == TestResult.StatusType.FAIL:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>FAIL</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.WARN:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>WARN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.NA:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>NA</status>"
else:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>UNKNOWN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " </result>"
print >>xml, "</results>"
print >>xml, "</loganalysis>"
xml.close()
def main():
dirName = os.path.dirname(os.path.abspath(__file__))
# deal with command line arguments
parser = argparse.ArgumentParser(description='Analyze an APM Dataflash log for known issues')
parser.add_argument('logfile', type=argparse.FileType('r'), help='path to Dataflash log file (or - for stdin)')
parser.add_argument('-f', '--format', metavar='', type=str, action='store', choices=['bin','log','auto'], default='auto', help='log file format: \'bin\',\'log\' or \'auto\'')
parser.add_argument('-q', '--quiet', metavar='', action='store_const', const=True, help='quiet mode, do not print results')
parser.add_argument('-p', '--profile', metavar='', action='store_const', const=True, help='output performance profiling data')
parser.add_argument('-s', '--skip_bad', metavar='', action='store_const', const=True, help='skip over corrupt dataflash lines')
parser.add_argument('-e', '--empty', metavar='', action='store_const', const=True, help='run an initial check for an empty log')
parser.add_argument('-x', '--xml', type=str, metavar='XML file', nargs='?', const='', default='', help='write output to specified XML file (or - for stdout)')
parser.add_argument('-v', '--verbose', metavar='', action='store_const', const=True, help='verbose output')
args = parser.parse_args()
# load the log
startTime = time.time()
logdata = DataflashLog.DataflashLog(args.logfile.name, format=args.format, ignoreBadlines=args.skip_bad) # read log
endTime = time.time()
if args.profile:
print "Log file read time: %.2f seconds" % (endTime-startTime)
# check for empty log if requested
if args.empty:
emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata)
if emptyErr:
sys.stderr.write("Empty log file: %s, %s" % (logdata.filename, emptyErr))
sys.exit(1)
#run the tests, and gather timings
testSuite = TestSuite()
startTime = time.time()
testSuite.run(logdata, args.verbose) # run tests
endTime = time.time()
if args.profile:
print "Test suite run time: %.2f seconds" % (endTime-startTime)
# deal with output
if not args.quiet:
testSuite.outputPlainText(args.profile)
if args.xml:
testSuite.outputXML(args.xml)
if not args.quiet:
print "XML output written to file: %s\n" % args.xml
if __name__ == "__main__":
main()
| gpl-3.0 |
ubic135/odoo-design | addons/crm_partner_assign/__openerp__.py | 114 | 2453 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner Assignation & Geolocation',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
This is the module used by OpenERP SA to redirect customers to its partners, based on geolocation.
======================================================================================================
This modules lets you geolocate Leads, Opportunities and Partners based on their address.
Once the coordinates of the Lead/Opportunity is known, they can be automatically assigned
to an appropriate local partner, based on the distance and the weight that was assigned to the partner.
""",
'author': 'OpenERP SA',
'depends': ['base_geolocalize', 'crm', 'account', 'portal'],
'data': [
'security/ir.model.access.csv',
'res_partner_view.xml',
'wizard/crm_forward_to_partner_view.xml',
'wizard/crm_channel_interested_view.xml',
'crm_lead_view.xml',
'crm_partner_assign_data.xml',
'crm_portal_view.xml',
'portal_data.xml',
'report/crm_lead_report_view.xml',
'report/crm_partner_report_view.xml',
],
'demo': [
'res_partner_demo.xml',
'crm_lead_demo.xml'
],
'test': ['test/partner_assign.yml'],
'installable': True,
'auto_install': False,
'images': ['images/partner_geo_localization.jpeg','images/partner_grade.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jviada/QuantEcon.py | quantecon/tests/test_lqcontrol.py | 7 | 2299 | """
Author: Chase Coleman
Filename: test_lqcontrol
Tests for lqcontrol.py file
"""
import sys
import os
import unittest
import numpy as np
from scipy.linalg import LinAlgError
from numpy.testing import assert_allclose
from quantecon.lqcontrol import LQ
class TestLQControl(unittest.TestCase):
def setUp(self):
# Initial Values
q = 1.
r = 1.
rf = 1.
a = .95
b = -1.
c = .05
beta = .95
T = 1
self.lq_scalar = LQ(q, r, a, b, C=c, beta=beta, T=T, Rf=rf)
Q = np.array([[0., 0.], [0., 1]])
R = np.array([[1., 0.], [0., 0]])
RF = np.eye(2) * 100
A = np.ones((2, 2)) * .95
B = np.ones((2, 2)) * -1
self.lq_mat = LQ(Q, R, A, B, beta=beta, T=T, Rf=RF)
def tearDown(self):
del self.lq_scalar
del self.lq_mat
def test_scalar_sequences(self):
lq_scalar = self.lq_scalar
x0 = 2
x_seq, u_seq, w_seq = lq_scalar.compute_sequence(x0)
# Solution found by hand
u_0 = (-2*lq_scalar.A*lq_scalar.B*lq_scalar.beta*lq_scalar.Rf) / \
(2*lq_scalar.Q+lq_scalar.beta*lq_scalar.Rf*2*lq_scalar.B**2) \
* x0
x_1 = lq_scalar.A * x0 + lq_scalar.B * u_0 + w_seq[0, -1]
assert_allclose(u_0, u_seq, rtol=1e-4)
assert_allclose(x_1, x_seq[0, -1], rtol=1e-4)
def test_mat_sequences(self):
lq_mat = self.lq_mat
x0 = np.random.randn(2) * 25
x_seq, u_seq, w_seq = lq_mat.compute_sequence(x0)
assert_allclose(np.sum(u_seq), .95 * np.sum(x0), atol=1e-3)
assert_allclose(x_seq[:, -1], np.zeros_like(x0), atol=1e-3)
def test_stationary_mat(self):
x0 = np.random.randn(2) * 25
lq_mat = self.lq_mat
P, F, d = lq_mat.stationary_values()
f_answer = np.array([[-.95, -.95], [0., 0.]])
p_answer = np.array([[1., 0], [0., 0.]])
val_func_lq = np.dot(x0, P).dot(x0)
val_func_answer = x0[0]**2
assert_allclose(f_answer, F, atol=1e-3)
assert_allclose(val_func_lq, val_func_answer, atol=1e-3)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestLQControl)
unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite)
| bsd-3-clause |
4dn-dcic/fourfront | src/encoded/upgrade/file.py | 2 | 2394 | from snovault import (
upgrade_step,
)
@upgrade_step('file_fastq', '1', '2')
@upgrade_step('file_calibration', '1', '2')
@upgrade_step('file_microscopy', '1', '2')
@upgrade_step('file_processed', '1', '2')
@upgrade_step('file_reference', '1', '2')
def file_1_2(value, system):
file_format = value.get('file_format')
formats = system['registry']['collections']['FileFormat']
format_item = formats.get(file_format)
fuuid = None
try:
fuuid = str(format_item.uuid)
except AttributeError:
pass
if not fuuid:
other_format = formats.get('other')
fuuid = str(other_format.uuid)
note = value.get('notes', '')
note = note + ' FILE FORMAT: ' + file_format
value['notes'] = note
value['file_format'] = fuuid
# need to also check for extra files to upgrade_step
extras = value.get('extra_files')
if extras:
for i, extra in enumerate(extras):
eformat = extra.get('file_format')
eformat_item = formats.get(eformat)
efuuid = None
try:
efuuid = str(eformat_item.uuid)
except AttributeError:
pass
if not efuuid:
other_format = formats.get('other')
efuuid = str(other_format.uuid)
note = value.get('notes', '')
note = note + ' EXTRA FILE FORMAT: ' + str(i) + '-' + eformat
value['notes'] = note
value['extra_files'][i]['file_format'] = efuuid
@upgrade_step('file_processed', '2', '3')
@upgrade_step('file_vistrack', '1', '2')
def file_track_data_upgrade(value, system):
field_map = {
"dataset_type": "override_experiment_type",
"assay_info": "override_assay_info",
"replicate_identifiers": "override_replicate_info",
"biosource_name": "override_biosource_name",
"experiment_bucket": "override_experiment_bucket",
"project_lab": "override_lab_name"
}
for oldprop, newprop in field_map.items():
oldpropval = value.get(oldprop)
if oldpropval:
if oldprop == 'replicate_identifiers':
if len(oldpropval) > 1:
oldpropval = 'merged replicates'
else:
oldpropval = oldpropval[0]
value[newprop] = oldpropval
del value[oldprop]
| mit |
terrencepreilly/darglint | darglint/parse/grammars/sphinx_variables_section.py | 1 | 10525 | # Generated on 2020-07-25 08:37:49.972660
from darglint.parse.grammar import (
BaseGrammar,
P,
)
from darglint.errors import (
IndentError,
)
from darglint.token import (
TokenType,
)
from darglint.parse.identifiers import (
NoqaIdentifier,
)
from darglint.errors import (
EmptyDescriptionError,
)
class VariablesSectionGrammar(BaseGrammar):
productions = [
P("variables-section", ([], "vhead", "variables-section1", 0), ([], "vhead-no-follow", "newlines", 0), ([EmptyDescriptionError], "colon", "vhead-no-follow1", 0), ([EmptyDescriptionError], "colon", "vhead-no-follow4", 0)),
P("vhead", ([], "colon", "vhead0", 0), ([], "colon", "vhead2", 0)),
P("vhead-no-follow", ([EmptyDescriptionError], "colon", "vhead-no-follow1", 0), ([EmptyDescriptionError], "colon", "vhead-no-follow4", 0)),
P("variable-type-section", (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("item-body", ([], "line", "item-body0", 2), ([], "line", "item-body1", 2), ([], "line", "item-body2", 2), ([], "word", "line", 2), ([], "word", "noqa-maybe", 2), ([NoqaIdentifier], "hash", "noqa", 2), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 2), (TokenType.INDENT, 2), (TokenType.COLON, 2), (TokenType.HASH, 2), (TokenType.LPAREN, 2), (TokenType.RPAREN, 2), (TokenType.WORD, 2), (TokenType.RAISES, 2), (TokenType.ARGUMENTS, 2), (TokenType.ARGUMENT_TYPE, 2), (TokenType.RETURNS, 2), (TokenType.RETURN_TYPE, 2), (TokenType.YIELDS, 2), (TokenType.YIELD_TYPE, 2), (TokenType.VARIABLES, 2), (TokenType.VARIABLE_TYPE, 2), (TokenType.NOQA, 2), (TokenType.OTHER, 2), (TokenType.RECEIVES, 2), (TokenType.WARNS, 2), (TokenType.SEE, 2), (TokenType.ALSO, 2), (TokenType.NOTES, 2), (TokenType.EXAMPLES, 2), (TokenType.REFERENCES, 2), (TokenType.HEADER, 2), ([IndentError], "line", "item-body7", 1)),
P("block-indented", ([], "paragraph-indented", "block-indented0", 0), ([], "paragraph-indented", "block-indented1", 0), ([], "indented", "paragraph-indented0", 0), ([], "indented", "line", 0)),
P("split-indented", ([], "newline", "split-indented0", 0), (TokenType.NEWLINE, 0)),
P("paragraph-indented", ([], "indented", "paragraph-indented0", 0), ([], "indented", "line", 0)),
P("indented", ([], "indent", "indents", 0), (TokenType.INDENT, 0)),
P("block", ([], "paragraph", "block0", 0), ([], "indents", "paragraph0", 0), ([], "indents", "line", 0), ([], "line", "paragraph2", 0), ([], "word", "line", 0), ([], "word", "noqa-maybe", 0), ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0), (TokenType.INDENT, 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0), ([], "line", "paragraph1", 0)),
P("paragraph", ([], "indents", "paragraph0", 0), ([], "indents", "line", 0), ([], "line", "paragraph2", 0), ([], "word", "line", 0), ([], "word", "noqa-maybe", 0), ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0), (TokenType.INDENT, 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0), ([], "line", "paragraph1", 0)),
P("line", ([], "word", "line", 0), ([], "word", "noqa-maybe", 0), ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0), (TokenType.INDENT, 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("indents", ([], "indent", "indents", 0), (TokenType.INDENT, 0)),
P("split", ([], "newline", "split0", 0)),
P("newlines", ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0)),
P("word", (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("colon", (TokenType.COLON, 0)),
P("hash", (TokenType.HASH, 0)),
P("indent", (TokenType.INDENT, 0)),
P("newline", (TokenType.NEWLINE, 0)),
P("variables", (TokenType.VARIABLES, 0)),
P("noqa", (TokenType.NOQA, 0)),
P("noqa-maybe", ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0)),
P("noqa-head", ([], "hash", "noqa", 0)),
P("words", ([], "word", "words", 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("variables-section1", ([], "item-body", "newlines", 0), ([], "line", "item-body0", 2), ([], "line", "item-body1", 2), ([], "line", "item-body2", 2), ([], "word", "line", 2), ([], "word", "noqa-maybe", 2), ([NoqaIdentifier], "hash", "noqa", 2), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 2), (TokenType.INDENT, 2), (TokenType.COLON, 2), (TokenType.HASH, 2), (TokenType.LPAREN, 2), (TokenType.RPAREN, 2), (TokenType.WORD, 2), (TokenType.RAISES, 2), (TokenType.ARGUMENTS, 2), (TokenType.ARGUMENT_TYPE, 2), (TokenType.RETURNS, 2), (TokenType.RETURN_TYPE, 2), (TokenType.YIELDS, 2), (TokenType.YIELD_TYPE, 2), (TokenType.VARIABLES, 2), (TokenType.VARIABLE_TYPE, 2), (TokenType.NOQA, 2), (TokenType.OTHER, 2), (TokenType.RECEIVES, 2), (TokenType.WARNS, 2), (TokenType.SEE, 2), (TokenType.ALSO, 2), (TokenType.NOTES, 2), (TokenType.EXAMPLES, 2), (TokenType.REFERENCES, 2), (TokenType.HEADER, 2), ([IndentError], "line", "item-body7", 1)),
P("vhead0", ([], "variables", "vhead1", 0)),
P("vhead1", ([], "word", "colon", 0)),
P("vhead2", ([], "variables", "vhead3", 0)),
P("vhead3", ([], "variable-type-section", "vhead4", 0)),
P("vhead4", ([], "word", "colon", 0)),
P("vhead-no-follow1", ([], "variables", "vhead-no-follow2", 0)),
P("vhead-no-follow2", ([], "word", "colon", 0)),
P("vhead-no-follow4", ([], "variables", "vhead-no-follow5", 0)),
P("vhead-no-follow5", ([], "variable-type-section", "vhead-no-follow6", 0)),
P("vhead-no-follow6", ([], "word", "colon", 0)),
P("item-body0", ([], "newline", "block-indented", 0)),
P("item-body1", ([], "newlines", "block-indented", 0), ([], "paragraph-indented", "block-indented0", 0), ([], "paragraph-indented", "block-indented1", 0), ([], "indented", "paragraph-indented0", 0), ([], "indented", "line", 0)),
P("item-body2", ([], "newline", "item-body3", 0)),
P("item-body3", ([], "indent", "item-body4", 0)),
P("item-body4", ([], "newline", "item-body5", 0)),
P("item-body5", ([], "newlines", "block-indented", 0), ([], "paragraph-indented", "block-indented0", 0), ([], "paragraph-indented", "block-indented1", 0), ([], "indented", "paragraph-indented0", 0), ([], "indented", "line", 0)),
P("item-body7", ([], "newline", "block", 0)),
P("block-indented0", ([], "split", "block-indented", 0)),
P("block-indented1", ([], "split-indented", "block-indented", 0)),
P("split-indented0", ([], "indents", "newlines", 0), ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0), ([], "indent", "indents", 0), (TokenType.INDENT, 0)),
P("paragraph-indented0", ([], "line", "paragraph-indented1", 0)),
P("paragraph-indented1", ([], "newline", "paragraph-indented", 0)),
P("block0", ([], "split", "block", 0)),
P("paragraph0", ([], "line", "paragraph1", 0)),
P("paragraph1", ([], "newline", "paragraph", 0)),
P("paragraph2", ([], "newline", "paragraph", 0)),
P("split0", ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0)),
P("noqa-statement1", ([], "colon", "words", 0)),
]
start = "variables-section" | mit |
ContextLogic/luigi | test/contrib/bigquery_test.py | 5 | 6811 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
These are the unit tests for the BigQuery-luigi binding.
"""
import luigi
from luigi.contrib import bigquery
from helpers import unittest
from mock import MagicMock
PROJECT_ID = 'projectid'
DATASET_ID = 'dataset'
class TestRunQueryTask(bigquery.BigQueryRunQueryTask):
client = MagicMock()
query = ''' SELECT 'hello' as field1, 2 as field2 '''
table = luigi.Parameter()
def output(self):
return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client)
class TestRunQueryTaskDontFlattenResults(TestRunQueryTask):
@property
def flatten_results(self):
return False
class TestRunQueryTaskWithRequires(bigquery.BigQueryRunQueryTask):
client = MagicMock()
table = luigi.Parameter()
def requires(self):
return TestRunQueryTask(table='table1')
@property
def query(self):
requires = self.requires().output().table
dataset = requires.dataset_id
table = requires.table_id
return 'SELECT * FROM [{dataset}.{table}]'.format(dataset=dataset, table=table)
def output(self):
return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client)
class TestRunQueryTaskWithUdf(bigquery.BigqueryRunQueryTask):
client = MagicMock()
table = luigi.Parameter()
@property
def udf_resource_uris(self):
return ["gs://test/file1.js", "gs://test/file2.js"]
@property
def query(self):
return 'SELECT 1'
def output(self):
return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client)
class TestRunQueryTaskWithoutLegacySql(bigquery.BigqueryRunQueryTask):
client = MagicMock()
table = luigi.Parameter()
@property
def use_legacy_sql(self):
return False
@property
def query(self):
return 'SELECT 1'
def output(self):
return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client)
class TestExternalBigQueryTask(bigquery.ExternalBigQueryTask):
client = MagicMock()
def output(self):
return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, 'table1', client=self.client)
class TestCreateViewTask(bigquery.BigQueryCreateViewTask):
client = MagicMock()
view = '''SELECT * FROM table LIMIT 10'''
def output(self):
return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, 'view1', client=self.client)
class BigQueryTest(unittest.TestCase):
def test_bulk_complete(self):
parameters = ['table1', 'table2']
client = MagicMock()
client.dataset_exists.return_value = True
client.list_tables.return_value = ['table2', 'table3']
TestRunQueryTask.client = client
complete = list(TestRunQueryTask.bulk_complete(parameters))
self.assertEqual(complete, ['table2'])
def test_dataset_doesnt_exist(self):
client = MagicMock()
client.dataset_exists.return_value = False
TestRunQueryTask.client = client
complete = list(TestRunQueryTask.bulk_complete(['table1']))
self.assertEqual(complete, [])
def test_query_property(self):
task = TestRunQueryTask(table='table2')
task.client = MagicMock()
task.run()
(_, job), _ = task.client.run_job.call_args
query = job['configuration']['query']['query']
self.assertEqual(query, TestRunQueryTask.query)
def test_override_query_property(self):
task = TestRunQueryTaskWithRequires(table='table2')
task.client = MagicMock()
task.run()
(_, job), _ = task.client.run_job.call_args
query = job['configuration']['query']['query']
expected_table = '[' + DATASET_ID + '.' + task.requires().output().table.table_id + ']'
self.assertIn(expected_table, query)
self.assertEqual(query, task.query)
def test_query_udf(self):
task = TestRunQueryTaskWithUdf(table='table2')
task.client = MagicMock()
task.run()
(_, job), _ = task.client.run_job.call_args
udfs = [
{'resourceUri': 'gs://test/file1.js'},
{'resourceUri': 'gs://test/file2.js'},
]
self.assertEqual(job['configuration']['query']['userDefinedFunctionResources'], udfs)
def test_query_with_legacy_sql(self):
task = TestRunQueryTask(table='table2')
task.client = MagicMock()
task.run()
(_, job), _ = task.client.run_job.call_args
self.assertEqual(job['configuration']['query']['useLegacySql'], True)
def test_query_without_legacy_sql(self):
task = TestRunQueryTaskWithoutLegacySql(table='table2')
task.client = MagicMock()
task.run()
(_, job), _ = task.client.run_job.call_args
self.assertEqual(job['configuration']['query']['useLegacySql'], False)
def test_external_task(self):
task = TestExternalBigQueryTask()
self.assertIsInstance(task, luigi.ExternalTask)
self.assertIsInstance(task, bigquery.MixinBigQueryBulkComplete)
def test_create_view(self):
task = TestCreateViewTask()
task.client.get_view.return_value = None
self.assertFalse(task.complete())
task.run()
(table, view), _ = task.client.update_view.call_args
self.assertEqual(task.output().table, table)
self.assertEqual(task.view, view)
def test_update_view(self):
task = TestCreateViewTask()
task.client.get_view.return_value = 'some other query'
self.assertFalse(task.complete())
task.run()
(table, view), _ = task.client.update_view.call_args
self.assertEqual(task.output().table, table)
self.assertEqual(task.view, view)
def test_view_completed(self):
task = TestCreateViewTask()
task.client.get_view.return_value = task.view
self.assertTrue(task.complete())
def test_flatten_results(self):
task = TestRunQueryTask(table='table3')
self.assertTrue(task.flatten_results)
def test_dont_flatten_results(self):
task = TestRunQueryTaskDontFlattenResults(table='table3')
self.assertFalse(task.flatten_results)
| apache-2.0 |
aricchen/openHR | openerp/addons/portal_project/tests/__init__.py | 170 | 1124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
public-ink/public-ink | server/appengine/lib/numpy/core/tests/test_function_base.py | 16 | 11429 | from __future__ import division, absolute_import, print_function
from numpy import (logspace, linspace, geomspace, dtype, array, sctypes,
arange, isnan, ndarray, sqrt, nextafter)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_allclose, suppress_warnings
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace(TestCase):
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=0)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace(TestCase):
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
y = geomspace(-3+0j, 0-3j, 3)
assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(-2-3j, 5+7j, 7)
assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
2.08885354-4.34146838j, 4.58345529-3.16355218j,
6.41401745-0.55233457j, 6.75707386+3.11795092j,
5+7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
class TestLinspace(TestCase):
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=0)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*safely interpreted as an integer")
y = list(linspace(0, 1, 2.5))
assert_(y == [0.0, 1.0])
def test_type(self):
t1 = linspace(0, 1, 0).dtype
t2 = linspace(0, 1, 1).dtype
t3 = linspace(0, 1, 2).dtype
assert_equal(t1, t2)
assert_equal(t2, t3)
def test_dtype(self):
y = linspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = linspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = linspace(lim1[0], lim1[1], 5)
t2 = linspace(lim2[0], lim2[1], 5)
t3 = linspace(lim3[0], lim3[1], 5)
t4 = linspace(-120.0, 100.0, 5)
t5 = linspace(120.0, -100.0, 5)
t6 = linspace(1200.0, 1000.0, 5)
assert_equal(t1, t4)
assert_equal(t2, t5)
assert_equal(t3, t6)
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
lim2 = linspace(1j, 10, 5)
t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_subclass(self):
a = array(0).view(PhysicalQuantity2)
b = array(1).view(PhysicalQuantity2)
ls = linspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0))
ls = linspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0, 1))
def test_array_interface(self):
# Regression test for https://github.com/numpy/numpy/pull/6659
# Ensure that start/stop can be objects that implement
# __array_interface__ and are convertible to numeric scalars
class Arrayish(object):
"""
A generic object that supports the __array_interface__ and hence
can in principle be converted to a numeric scalar, but is not
otherwise recognized as numeric, but also happens to support
multiplication by floats.
Data should be an object that implements the buffer interface,
and contains at least 4 bytes.
"""
def __init__(self, data):
self._data = data
@property
def __array_interface__(self):
# Ideally should be `'shape': ()` but the current interface
# does not allow that
return {'shape': (1,), 'typestr': '<i4', 'data': self._data,
'version': 3}
def __mul__(self, other):
# For the purposes of this test any multiplication is an
# identity operation :)
return self
one = Arrayish(array(1, dtype='<i4'))
five = Arrayish(array(5, dtype='<i4'))
assert_equal(linspace(one, five), linspace(1, 5))
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
for ftype in sctypes['float']:
stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
def test_equivalent_to_arange(self):
for j in range(1000):
assert_equal(linspace(0, j, j+1, dtype=int),
arange(j+1, dtype=int))
def test_retstep(self):
y = linspace(0, 1, 2, retstep=True)
assert_(isinstance(y, tuple) and len(y) == 2)
for num in (0, 1):
for ept in (False, True):
y = linspace(0, 1, num, endpoint=ept, retstep=True)
assert_(isinstance(y, tuple) and len(y) == 2 and
len(y[0]) == num and isnan(y[1]),
'num={0}, endpoint={1}'.format(num, ept))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
ismael-liceras/php-invaders | gameengine.py | 1 | 12699 | import pygame
from pygame.locals import *
from player import Player
from stagegenerator import StageGenerator
from gamestatus import GameStatus
from gameconfig import GameConfig
class GameEngine():
def __init__(self, modes=None):
pygame.init()
self.screen = pygame.display.set_mode((800, 600))
#Modes
self.cheater_mode = False
if modes is not None and 'cheater' in modes and modes['cheater'] is True:
self.cheater_mode = True
# Stage Generator
self.stage_generator = StageGenerator()
# Game Status
self.game_status = GameStatus()
pygame.display.set_caption('PHP Invaders')
pygame.mouse.set_visible(0)
self.stage_quantity = GameConfig.get_stage_quantity()
self.clock = pygame.time.Clock()
self.enemy_box = None
#Screens
self.screen_type_data = {"mainmenu": 1, "playing": 2, "pause": 3, "waiting4stage": 4, "gameover": 5}
self.screen_type = None
#Sprites
self.sprites = {
'friendly_fire': pygame.sprite.RenderPlain(),
'enemy_fire': pygame.sprite.RenderPlain(),
'enemies': pygame.sprite.RenderPlain(),
'friends': pygame.sprite.RenderPlain(),
'menu_options': pygame.sprite.RenderPlain(),
'special_items': pygame.sprite.RenderPlain(),
'prisoners': pygame.sprite.RenderPlain(),
'others': pygame.sprite.RenderPlain(),
}
self.player = Player()
self.player.add(self.sprites['friends'])
#Others
self.show_main_stage()
self.screen.blit(self.stage_generator.get_background(), (0, 0))
pygame.display.flip()
def is_current_screen(self, key):
return self.screen_type_data[key] == self.screen_type
def set_current_screen(self, key):
self.screen_type = self.screen_type_data[key]
def clock_tick(self):
self.clock.tick(60)
def reset_from_gameover(self):
self.sprites['enemies'].empty()
self.sprites['enemy_fire'].empty()
self.sprites['prisoners'].empty()
self.player.reset()
self.game_status.reset()
self.stage_generator.reset()
def show_main_stage(self):
self.screen_type = self.screen_type_data["mainmenu"]
self.sprites['menu_options'] = self.stage_generator.show_main_stage()
#Game Status Bar
def refresh_status_bar(self):
screen = pygame.display.get_surface()
offset_y = screen.get_height()-20
pygame.draw.rect(self.stage_generator.get_background(), (0, 0, 0), pygame.Rect(0, screen.get_height() - 25, screen.get_width(), 25), 0);
if pygame.font:
font = pygame.font.Font(None, 20)
fontcolor = (250, 250, 250)
status_data = [
("ver. " + GameConfig.get_version() + " by Ismael Liceras", 600),
("STAGE " + str(self.game_status.get_stage()), 10),
("LIVES " + str(self.game_status.get_lives()), 110),
("TIME " + str(self.game_status.get_time()), 210),
("SCORE " + str(self.game_status.get_score()), 310)
]
for data_piece in status_data:
text = font.render(data_piece[0], 1, fontcolor)
self.stage_generator.get_background().blit(text, (data_piece[1], offset_y))
def handle_lifecycle_events(self, event):
if event.type == QUIT:
return -1
elif event.type == KEYDOWN and event.key == K_ESCAPE:
return -1
elif event.type == KEYDOWN and event.key == K_HOME:
self.reset_from_gameover()
self.show_main_stage()
elif event.type == KEYDOWN and event.key == K_PAUSE:
if self.is_current_screen("pause"):
self.set_current_screen("playing")
self.stage_generator.refresh_background()
elif self.is_current_screen("playing"):
self.set_current_screen("pause")
self.stage_generator.show_pause_banner()
def handle_playerops_events(self, event):
if event.type == KEYDOWN and not self.is_current_screen('pause'):
if event.key == K_LEFT:
self.player.go_left()
elif event.key == K_RIGHT:
self.player.go_right()
elif event.key == K_SPACE and not self.is_current_screen('gameover'):
shoot = self.player.do_shoot()
shoot.add(self.sprites['friendly_fire'])
elif event.type == KEYUP and \
((event.key == K_LEFT and self.player.get_direction() == 'left') or
(event.key == K_RIGHT and self.player.get_direction() == 'right')):
self.player.stop_flying()
def handle_timer_events(self, event):
#Game's time
if event.type == USEREVENT + 1:
if self.is_current_screen("playing"):
time = self.game_status.run_1_sec()
if time == 0:
self.go_to_gameover()
#Ready's screen
elif event.type == USEREVENT + 2:
self.sprites['enemies'], self.sprites['prisoners'],\
self.sprites['enemy_fire'], self.enemy_box = self.stage_generator.start_next_stage()
pygame.time.set_timer(USEREVENT+1, 1000)
self.set_current_screen('playing')
self.stage_generator.refresh_background()
def handle_cheat_mode_events(self, event):
if event.type == KEYDOWN and self.is_current_screen('playing'):
if event.key == K_n:
print "Next Stage!"
for dead_enemy in self.sprites['enemies']:
dead_enemy.add(self.sprites['others'])
dead_enemy.remove(self.sprites['enemies'])
dead_enemy.kill_enemy()
def handle_events(self):
for event in pygame.event.get():
# Exit and pause
if self.handle_lifecycle_events(event) == -1:
return -1
# Player's ops
self.handle_playerops_events(event)
# Timer's events
self.handle_timer_events(event)
# Cheat mode events
if self.cheater_mode is True:
self.handle_cheat_mode_events(event)
def check_player2enemies_collision(self):
collision = pygame.sprite.groupcollide(self.sprites['friendly_fire'], self.sprites['enemies'], True, False)
if len(collision) > 0:
key, value = collision.popitem()
dead_enemy = value[0]
dead_enemy.add(self.sprites['others'])
dead_enemy.remove(self.sprites['enemies'])
dead_enemy.kill_enemy()
self.game_status.add_score(dead_enemy.get_score())
special_item = dead_enemy.drop_special_item()
if special_item is not None:
special_item.add(self.sprites['special_items'])
def check_enemies2player_collision(self):
collision = pygame.sprite.groupcollide(self.sprites['enemy_fire'], self.sprites['friends'], True, False)
if len(collision) > 0:
if self.player.is_invincible() is not True:
self.hit_player()
def hit_player(self):
if self.game_status.remove_life() == 0:
self.go_to_gameover()
else:
self.player.shocked()
self.game_status.set_stage_invictus(False)
def check_menu_collision(self):
collision = \
pygame.sprite.groupcollide(self.sprites['friendly_fire'], self.sprites['menu_options'], True, True)
if len(collision) > 0:
key, value = collision.popitem()
option_choosen = value[0]
self.sprites['menu_options'].empty()
if option_choosen.get_type_id() == "play":
self.set_current_screen('waiting4stage')
self.go_to_next_stage()
elif option_choosen.get_type_id() == "about":
self.stage_generator.refresh_background()
self.sprites['menu_options'] = self.stage_generator.show_about_stage()
elif option_choosen.get_type_id() == "rules":
self.sprites['menu_options'] = self.stage_generator.show_rules_stage()
elif option_choosen.get_type_id() == "back":
self.stage_generator.refresh_background()
self.sprites['menu_options'] = self.stage_generator.show_main_stage()
elif option_choosen.get_type_id() == "exit":
exit()
def check_player2specialitem(self):
collision = pygame.sprite.groupcollide(self.sprites['friends'], self.sprites['special_items'], False, True)
if len(collision) > 0:
#Player pilla objeto especial!
key, value = collision.popitem()
item = value[0]
item.do_action(self)
def check_player2prisoners_collision(self):
collision = pygame.sprite.groupcollide(self.sprites['friendly_fire'], self.sprites['prisoners'], True, True)
if len(collision) > 0:
self.hit_player()
def check_collisions(self):
if self.is_current_screen('playing'):
# Player shoots enemies
self.check_player2enemies_collision()
# Player shoots prisoners
self.check_player2prisoners_collision()
# Enemies shoot player
self.check_enemies2player_collision()
elif self.is_current_screen('mainmenu'):
self.check_menu_collision()
if self.is_current_screen('playing') or self.is_current_screen('waiting4stage'):
self.check_player2specialitem()
def go_to_next_stage(self, bonus=None):
self.set_current_screen("waiting4stage")
self.stage_generator.get_ready_to_next_stage(self.game_status.get_score(), self.game_status.get_stage_score(), bonus)
self.game_status.reset_to_next_stage(self.stage_generator.get_current_stage())
def check_stage_clear(self):
if self.is_current_screen('playing') and len(self.sprites['enemies']) == 0:
bonus = self.add_stage_bonus()
if self.stage_generator.get_current_stage() >= self.stage_quantity:
self.goto_to_victory()
else:
self.go_to_next_stage(bonus)
def add_stage_bonus(self):
bonus = {}
bonus['time'] = self.game_status.add_bonus_time()
if self.game_status.get_stage_invictus():
bonus['invictus'] = self.game_status.add_bonus_invictus()
if len(self.sprites['prisoners']) > 0:
bonus['prisoners'] = self.game_status.add_bonus_prisoners(len(self.sprites['prisoners']))
return bonus
# GameEngine's main method
def do_play(self):
self.clock_tick()
self.check_collisions()
if self.is_current_screen('playing'):
self.check_stage_clear()
self.update_sprites()
self.draw_everything()
def draw_everything(self):
if not self.is_current_screen('mainmenu'):
self.refresh_status_bar()
self.screen.blit(self.stage_generator.get_background(), (0, 0))
self.sprites['friendly_fire'].draw(self.screen)
self.sprites['enemy_fire'].draw(self.screen)
self.sprites['enemies'].draw(self.screen)
self.sprites['friends'].draw(self.screen)
self.sprites['menu_options'].draw(self.screen)
self.sprites['special_items'].draw(self.screen)
self.sprites['prisoners'].draw(self.screen)
self.sprites['others'].draw(self.screen)
pygame.display.flip()
def update_sprites(self):
if not self.is_current_screen('pause'):
self.update_enemies()
self.sprites['friendly_fire'].update()
self.sprites['enemy_fire'].update()
self.sprites['enemies'].update()
self.sprites['friends'].update()
self.sprites['special_items'].update()
self.sprites['prisoners'].update()
self.sprites['others'].update()
def update_enemies(self):
if self.enemy_box is not None:
self.enemy_box.update()
def go_to_gameover(self):
self.player.kill_player()
self.set_current_screen("gameover")
self.stage_generator.refresh_background()
self.stage_generator.show_gameover_banner(self.game_status.score, self.game_status.stage)
def goto_to_victory(self):
self.player.make_winner()
self.set_current_screen("gameover")
self.stage_generator.refresh_background()
self.stage_generator.show_victory_banner(self.game_status.score) | mit |
AutorestCI/azure-sdk-for-python | azure-mgmt-sql/azure/mgmt/sql/models/location_capabilities.py | 2 | 1723 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LocationCapabilities(Model):
"""The capabilities for a location.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The location name.
:vartype name: str
:ivar status: Azure SQL Database's status for the location. Possible
values include: 'Visible', 'Available', 'Default', 'Disabled'
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:ivar supported_server_versions: The list of supported server versions.
:vartype supported_server_versions:
list[~azure.mgmt.sql.models.ServerVersionCapability]
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'supported_server_versions': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'CapabilityStatus'},
'supported_server_versions': {'key': 'supportedServerVersions', 'type': '[ServerVersionCapability]'},
}
def __init__(self):
super(LocationCapabilities, self).__init__()
self.name = None
self.status = None
self.supported_server_versions = None
| mit |
nguyentran/openviber | third_party/libjingle/talk/third_party/gtest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
Forage/Gramps | gramps/plugins/docgen/rtfdoc.py | 1 | 21285 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2011 Adam Stein <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from __future__ import print_function
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
import logging
LOG = logging.getLogger(".rtfdoc")
#------------------------------------------------------------------------
#
# Load the base BaseDoc class
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, FONT_SERIF, PARA_ALIGN_RIGHT,
PARA_ALIGN_CENTER, PARA_ALIGN_JUSTIFY,
URL_PATTERN)
from gramps.gen.utils.image import image_size, image_actual_size, resize_to_jpeg_buffer
from gramps.gen.errors import ReportError
from gramps.gen.constfunc import cuni
#------------------------------------------------------------------------
#
# Set up to make links clickable
#
#------------------------------------------------------------------------
_CLICKABLE = r'''{\\field{\\*\\fldinst HYPERLINK "\1"}{\\fldrslt \1}}'''
#------------------------------------------------------------------------
#
# RTF uses a unit called "twips" for its measurements. According to the
# RTF specification, 1 point is 20 twips. This routines converts
# centimeters to twips
#
# 2.54 cm/inch 72pts/inch, 20twips/pt
#
#------------------------------------------------------------------------
def twips(cm):
return int(((cm/2.54)*72)+0.5)*20
#------------------------------------------------------------------------
#
# Rich Text Format Document interface. The current interface does not
# use style sheets. Instead it writes raw formatting.
#
#------------------------------------------------------------------------
class RTFDoc(BaseDoc,TextDoc):
#--------------------------------------------------------------------
#
# Opens the file, and writes the header. Builds the color and font
# tables. Fonts are chosen using the MS TrueType fonts, since it
# is assumed that if you are generating RTF, you are probably
# targeting Word. This generator assumes a Western Europe character
# set.
#
#--------------------------------------------------------------------
def open(self,filename):
if filename[-4:] != ".rtf":
self.filename = filename + ".rtf"
else:
self.filename = filename
try:
self.f = open(self.filename,"w")
except IOError as msg:
errmsg = "%s\n%s" % (_("Could not create %s") % self.filename, msg)
raise ReportError(errmsg)
except:
raise ReportError(_("Could not create %s") % self.filename)
style_sheet = self.get_style_sheet()
self.f.write(
'{\\rtf1\\ansi\\ansicpg1252\\deff0\n'
'{\\fonttbl\n'
'{\\f0\\froman\\fcharset0\\fprq0 Times New Roman;}\n'
'{\\f1\\fswiss\\fcharset0\\fprq0 Arial;}}\n'
'{\colortbl\n'
)
self.color_map = {}
index = 1
self.color_map[(0,0,0)] = 0
self.f.write('\\red0\\green0\\blue0;')
for style_name in style_sheet.get_paragraph_style_names():
style = style_sheet.get_paragraph_style(style_name)
fgcolor = style.get_font().get_color()
bgcolor = style.get_background_color()
if fgcolor not in self.color_map:
self.color_map[fgcolor] = index
self.f.write('\\red%d\\green%d\\blue%d;' % fgcolor)
index += 1
if bgcolor not in self.color_map:
self.f.write('\\red%d\\green%d\\blue%d;' % bgcolor)
self.color_map[bgcolor] = index
index += 1
self.f.write('}\n')
self.f.write(
'\\kerning0\\cf0\\viewkind1' +
'\\paperw%d' % twips(self.paper.get_size().get_width()) +
'\\paperh%d' % twips(self.paper.get_size().get_height()) +
'\\margl%d' % twips(self.paper.get_left_margin()) +
'\\margr%d' % twips(self.paper.get_right_margin()) +
'\\margt%d' % twips(self.paper.get_top_margin()) +
'\\margb%d' % twips(self.paper.get_bottom_margin()) +
'\\widowctl\n'
)
self.in_table = 0
self.text = ""
#--------------------------------------------------------------------
#
# Write the closing brace, and close the file.
#
#--------------------------------------------------------------------
def close(self):
self.f.write('}\n')
self.f.close()
#--------------------------------------------------------------------
#
# Force a section page break
#
#--------------------------------------------------------------------
def end_page(self):
self.f.write('\\sbkpage\n')
#--------------------------------------------------------------------
#
# Starts a paragraph. Instead of using a style sheet, generate the
# the style for each paragraph on the fly. Not the ideal, but it
# does work.
#
#--------------------------------------------------------------------
def start_paragraph(self,style_name,leader=None):
self.opened = 0
style_sheet = self.get_style_sheet()
p = style_sheet.get_paragraph_style(style_name)
# build font information
f = p.get_font()
size = f.get_size()*2
bgindex = self.color_map[p.get_background_color()]
fgindex = self.color_map[f.get_color()]
if f.get_type_face() == FONT_SERIF:
self.font_type = '\\f0'
else:
self.font_type = '\\f1'
self.font_type += '\\fs%d\\cf%d\\cb%d' % (size,fgindex,bgindex)
if f.get_bold():
self.font_type += "\\b"
if f.get_underline():
self.font_type += "\\ul"
if f.get_italic():
self.font_type += "\\i"
# build paragraph information
if not self.in_table:
self.f.write('\\pard')
if p.get_alignment() == PARA_ALIGN_RIGHT:
self.f.write('\\qr')
elif p.get_alignment() == PARA_ALIGN_CENTER:
self.f.write('\\qc')
self.f.write(
'\\ri%d' % twips(p.get_right_margin()) +
'\\li%d' % twips(p.get_left_margin()) +
'\\fi%d' % twips(p.get_first_indent())
)
if p.get_alignment() == PARA_ALIGN_JUSTIFY:
self.f.write('\\qj')
if p.get_padding():
self.f.write('\\sa%d' % twips(p.get_padding()/2.0))
if p.get_top_border():
self.f.write('\\brdrt\\brdrs')
if p.get_bottom_border():
self.f.write('\\brdrb\\brdrs')
if p.get_left_border():
self.f.write('\\brdrl\\brdrs')
if p.get_right_border():
self.f.write('\\brdrr\\brdrs')
if p.get_first_indent():
self.f.write('\\fi%d' % twips(p.get_first_indent()))
if p.get_left_margin():
self.f.write('\\li%d' % twips(p.get_left_margin()))
if p.get_right_margin():
self.f.write('\\ri%d' % twips(p.get_right_margin()))
if leader:
self.opened = 1
self.f.write('\\tx%d' % twips(p.get_left_margin()))
self.f.write('{%s ' % self.font_type)
self.write_text(leader)
self.f.write(self.text)
self.text = ""
self.f.write('\\tab}')
self.opened = 0
#--------------------------------------------------------------------
#
# Ends a paragraph. Care has to be taken to make sure that the
# braces are closed properly. The self.opened flag is used to indicate
# if braces are currently open. If the last write was the end of
# a bold-faced phrase, braces may already be closed.
#
#--------------------------------------------------------------------
def end_paragraph(self):
# FIXME: I don't understand why no end paragraph marker is output when
# we are inside a table. Since at least version 3.2.2, this seems to mean that
# there is no new paragraph after the first line of a table entry.
# For example in the birth cell, the first paragraph should be the
# description (21 Jan 1900 in London); if there is a note following this,
# there is no newline between the description and the note.
if not self.in_table:
self.f.write(self.text)
LOG.debug("end_paragraph: opened: %d write: %s" %
(self.opened,
self.text + '}' if self.opened else "" + "newline"))
if self.opened:
self.f.write('}')
self.opened = 0
self.f.write('\n\\par')
self.text = ""
else:
if self.text == "":
self.write_text(" ")
self.text += '}'
#--------------------------------------------------------------------
#
# Inserts a manual page break
#
#--------------------------------------------------------------------
def page_break(self):
self.f.write('\\page\n')
#--------------------------------------------------------------------
#
# Starts boldfaced text, enclosed the braces
#
#--------------------------------------------------------------------
def start_bold(self):
LOG.debug("start_bold: opened: %d saved text: %s" %
(self.opened,
'}' if self.opened else "" + '{%s\\b ' % self.font_type))
if self.opened:
self.text += '}'
self.text += '{%s\\b ' % self.font_type
self.opened = 1
#--------------------------------------------------------------------
#
# Ends boldfaced text, closing the braces
#
#--------------------------------------------------------------------
def end_bold(self):
LOG.debug("end_bold: opened: %d saved text: %s" %
(self.opened,
self.text + '}'))
if not self.opened == 1:
print(self.opened)
raise RuntimeError
self.opened = 0
self.text += '}'
def start_superscript(self):
self.text += '{{\\*\\updnprop5801}\\up10 '
def end_superscript(self):
self.text += '}'
#--------------------------------------------------------------------
#
# Start a table. Grab the table style, and store it. Keep a flag to
# indicate that we are in a table. This helps us deal with paragraphs
# internal to a table. RTF does not require anything to start a
# table, since a table is treated as a bunch of rows.
#
#--------------------------------------------------------------------
def start_table(self, name,style_name):
self.in_table = 1
styles = self.get_style_sheet()
self.tbl_style = styles.get_table_style(style_name)
#--------------------------------------------------------------------
#
# End a table. Turn off the table flag
#
#--------------------------------------------------------------------
def end_table(self):
self.in_table = 0
#--------------------------------------------------------------------
#
# Start a row. RTF uses the \trowd to start a row. RTF also specifies
# all the cell data after it has specified the cell definitions for
# the row. Therefore it is necessary to keep a list of cell contents
# that is to be written after all the cells are defined.
#
#--------------------------------------------------------------------
def start_row(self):
self.contents = []
self.cell = 0
self.prev = 0
self.cell_percent = 0.0
self.f.write('\\trowd\n')
#--------------------------------------------------------------------
#
# End a row. Write the cell contents, separated by the \cell marker,
# then terminate the row
#
#--------------------------------------------------------------------
def end_row(self):
self.f.write('{')
for line in self.contents:
self.f.write(line)
self.f.write('\\cell ')
self.f.write('}\\pard\\intbl\\row\n')
#--------------------------------------------------------------------
#
# Start a cell. Dump out the cell specifics, such as borders. Cell
# widths are kind of interesting. RTF doesn't specify how wide a cell
# is, but rather where it's right edge is in relationship to the
# left margin. This means that each cell is the cumlative of the
# previous cells plus its own width.
#
#--------------------------------------------------------------------
def start_cell(self,style_name,span=1):
styles = self.get_style_sheet()
s = styles.get_cell_style(style_name)
self.remain = span -1
if s.get_top_border():
self.f.write('\\clbrdrt\\brdrs\\brdrw10\n')
if s.get_bottom_border():
self.f.write('\\clbrdrb\\brdrs\\brdrw10\n')
if s.get_left_border():
self.f.write('\\clbrdrl\\brdrs\\brdrw10\n')
if s.get_right_border():
self.f.write('\\clbrdrr\\brdrs\\brdrw10\n')
table_width = float(self.paper.get_usable_width())
for cell in range(self.cell,self.cell+span):
self.cell_percent += float(self.tbl_style.get_column_width(cell))
cell_width = twips((table_width * self.cell_percent)/100.0)
self.f.write('\\cellx%d\\pard\intbl\n' % cell_width)
self.cell += 1
#--------------------------------------------------------------------
#
# End a cell. Save the current text in the content lists, since data
# must be saved until all cells are defined.
#
#--------------------------------------------------------------------
def end_cell(self):
self.contents.append(self.text)
self.text = ""
#--------------------------------------------------------------------
#
# Add a photo. Embed the photo in the document. Use the Python
# imaging library to load and scale the photo. The image is converted
# to JPEG, since it is smaller, and supported by RTF. The data is
# dumped as a string of HEX numbers.
#
#--------------------------------------------------------------------
def add_media_object(self, name, pos, x_cm, y_cm, alt='', style_name=None, crop=None):
nx, ny = image_size(name)
if (nx, ny) == (0,0):
return
(act_width, act_height) = image_actual_size(x_cm, y_cm, nx, ny)
act_width = twips(act_width)
act_height = twips(act_height)
size = [act_width, act_height]
buf = resize_to_jpeg_buffer(name, size, crop=crop)
act_width = size[0] # In case it changed because of cropping or keeping the ratio
act_height = size[1]
self.f.write('{\*\shppict{\\pict\\jpegblip')
self.f.write('\\picwgoal%d\\pichgoal%d\n' % (act_width,act_height))
index = 1
for i in buf:
self.f.write('%02x' % ord(i))
if index%32==0:
self.f.write('\n')
index = index+1
self.f.write('}}\\par\n')
if len(alt):
self.f.write('%s\n\\par\n' % '\\par'.join(alt))
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the RTF doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. RTFDoc prints the html without handling it
links: bool, make URLs clickable if True
"""
text = str(styledtext)
self.start_paragraph(style_name)
linenb = 1
for line in text.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
if self.in_table:
# # Add LF when in table as in indiv_complete report
self.write_text('\n')
self.end_paragraph()
self.start_paragraph(style_name)
linenb = 1
else:
if ( linenb > 1 ):
self.write_text('\\line ')
self.write_text(line, links=links)
linenb += 1
# FIXME: I don't understand why these newlines are necessary.
# It may be related to the behaviour of end_paragraph inside tables, and
# write_text converting \n to end paragraph.
# This code prevents the whole document going wrong, but seems to produce an extra
# paragraph mark at the end of each table cell.
if self.in_table:
# # Add LF when in table as in indiv_complete report
self.write_text('\n')
self.end_paragraph()
#--------------------------------------------------------------------
#
# Writes text. If braces are not currently open, open them. Loop
# character by character (terribly inefficient, but it works). If a
# character is 8 bit (>127), convert it to a hex representation in
# the form of \`XX. Make sure to escape braces.
#
#--------------------------------------------------------------------
def write_text(self, text, mark=None, links=False):
# Convert to unicode, just in case it's not. Fix of bug 2449.
text = cuni(text)
text = text.replace('\n','\n\\par ')
LOG.debug("write_text: opened: %d input text: %s" %
(self.opened,
text))
if self.opened == 0:
self.opened = 1
self.text += '{%s ' % self.font_type
for i in text:
if ord(i) > 127:
if ord(i) < 256:
self.text += '\\\'%2x' % ord(i)
else:
# If (uni)code with more than 8 bits:
# RTF req valus in decimal, not hex.
self.text += '\\uc1\\u%d\\uc0' % ord(i)
elif i == '{' or i == '}' :
self.text += '\\%s' % i
else:
self.text += i
if links == True:
import re
self.text = re.sub(URL_PATTERN, _CLICKABLE, self.text)
LOG.debug("write_text, exit: opened: %d saved text: %s" %
(self.opened,
self.text))
def process_spaces(line, format):
"""
Function to process spaces in text lines for flowed and pre-formatted notes.
line : text to process
format : = 0 : Flowed, = 1 : Preformatted
If the text is flowed (format==0), then leading spaces
are removed, and multiple spaces are reduced to one.
If the text is pre-formatted (format==1). then all spaces are preserved
Note that xml is just treated like any other text,
because it will be from the original note, and it is just printed, not interpreted.
Returns the processed text, and the number of significant (i.e. non-white-space) chars.
"""
txt = ""
xml = False
space = False
sigcount = 0
# we loop through every character, which is very inefficient, but an attempt to use
# a regex replace didn't always work.
for char in line:
if char == " " or char == "\t":
if format == 1:
txt += char
elif format == 0 and sigcount == 0:
pass
elif format == 0 and space == False:
space = True
txt += char
elif format == 0 and space == True:
pass
else:
sigcount += 1
space = False
txt += char
return [txt, sigcount]
| gpl-2.0 |
taedla01/MissionPlanner | Lib/dumbdbm.py | 63 | 9070 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
f = self._open(self._dirfile, 'w')
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
f = _open(self._dirfile, 'a')
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
| gpl-3.0 |
cmcdowell/weatherpy | weatherpy/wind.py | 1 | 1807 |
class Wind(object):
"""
Current forecast information about the wind.
Attributes:
chill: Wind chill in degrees (integer). If a value for wind chill is not
found, chill will be None.
direction: Wind direction in degrees (integer). If a value for wind
direction is not found, direction will be None.
speed: Wind speed in units specified in the speed attribute of the
Units class (integer). If a value for wind speed is not found, speed
will be None.
"""
def __init__(self, wind):
try:
self.chill = int(wind['chill'])
except ValueError:
self.chill = None
try:
self.direction = int(wind['direction'])
except ValueError:
self.direction = None
try:
self.speed = float(wind['speed'])
except ValueError:
self.speed = None
def cardinal_direction(self):
"""
Returns the cardinal direction of the
wind as a string. Possible returned values are N, E, S, W, and None.
315 degrees to 45 degrees exclusive -> N
45 degrees to 135 degrees exclusive -> E
135 degrees to 225 degrees exclusive -> S
225 degrees to 315 degrees exclusive -> W
None if no direction found.
"""
if self.direction is None:
return None
if self.direction > 360 or self.direction < 0:
raise Exception('Direction out of range')
if (315 <= self.direction) <= 360 or 0 <= (self.direction) < 45:
return 'N'
elif 45 <= self.direction < 135:
return 'E'
elif 135 <= self.direction < 225:
return 'S'
elif 225 <= self.direction < 315:
return 'W'
| mit |
WURFL/wurfl-cloud-client-python | setup.py | 1 | 1722 | from setuptools import setup
import os.path
__license__ = """
Copyright (c) 2015 ScientiaMobile Inc.
The WURFL Cloud Client is intended to be used in both open-source and
commercial environments. To allow its use in as many situations as possible,
the WURFL Cloud Client is dual-licensed. You may choose to use the WURFL
Cloud Client under either the GNU GENERAL PUBLIC LICENSE, Version 2.0, or
the MIT License.
Refer to the COPYING.txt file distributed with this package.
"""
__copyright__ = "2015 ScientiaMobile Incorporated, All Rights Reserved"
__version__ = "1.1.1"
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
#doc = __doc__.strip()
setup (name="wurfl-cloud",
version=__version__,
author="ScientiaMobile",
author_email="[email protected]",
license=__license__,
packages=['wurfl_cloud', 'wurfl_cloud.cache'],
#description=doc,
#long_description=read('doc/README'),
platforms="All",
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Telecommunications Industry',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WAP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
])
| gpl-2.0 |
40223123/finaltest2 | static/Brython3.1.3-20150514-095342/Lib/zipfile.py | 620 | 66368 | """
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise RuntimeError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if self._decrypter is not None:
data = bytes(map(self._decrypter, data))
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
try:
if key == 'r':
self._RealGetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) >= ZIP_MAX_COMMENT:
if self.debug:
print('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
try:
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
except:
if not self._filePassed:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
cmpr = _get_compressor(zinfo.compress_type)
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError('File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
finally:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w', allowZip64=True) as zf:
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
| gpl-3.0 |
richardnpaul/FWL-Website | lib/python2.7/site-packages/django/contrib/sitemaps/tests/urls/http.py | 97 | 1691 | from datetime import datetime
from django.conf.urls import patterns, url
from django.contrib.sitemaps import Sitemap, GenericSitemap, FlatPageSitemap, views
from django.contrib.auth.models import User
from django.views.decorators.cache import cache_page
from django.contrib.sitemaps.tests.base import TestModel
class SimpleSitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
lastmod = datetime.now()
def items(self):
return [object()]
simple_sitemaps = {
'simple': SimpleSitemap,
}
generic_sitemaps = {
'generic': GenericSitemap({'queryset': TestModel.objects.all()}),
}
flatpage_sitemaps = {
'flatpages': FlatPageSitemap,
}
urlpatterns = patterns('django.contrib.sitemaps.views',
(r'^simple/index\.xml$', 'index', {'sitemaps': simple_sitemaps}),
(r'^simple/custom-index\.xml$', 'index',
{'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap_index.xml'}),
(r'^simple/sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': simple_sitemaps}),
(r'^simple/sitemap\.xml$', 'sitemap', {'sitemaps': simple_sitemaps}),
(r'^simple/custom-sitemap\.xml$', 'sitemap',
{'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap.xml'}),
(r'^generic/sitemap\.xml$', 'sitemap', {'sitemaps': generic_sitemaps}),
(r'^flatpages/sitemap\.xml$', 'sitemap', {'sitemaps': flatpage_sitemaps}),
url(r'^cached/index\.xml$', cache_page(1)(views.index),
{'sitemaps': simple_sitemaps, 'sitemap_url_name': 'cached_sitemap'}),
url(r'^cached/sitemap-(?P<section>.+)\.xml', cache_page(1)(views.sitemap),
{'sitemaps': simple_sitemaps}, name='cached_sitemap')
)
| gpl-3.0 |
dstftw/youtube-dl | youtube_dl/extractor/imgur.py | 20 | 5082 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
mimetype2ext,
ExtractorError,
)
class ImgurIE(InfoExtractor):
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv',
'info_dict': {
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
},
}, {
'url': 'https://imgur.com/A61SaA1',
'only_matching': True,
}, {
'url': 'https://i.imgur.com/crGpqCV.mp4',
'only_matching': True,
}, {
# no title
'url': 'https://i.imgur.com/jxBXAMC.gifv',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)
width = int_or_none(self._og_search_property(
'video:width', webpage, default=None))
height = int_or_none(self._og_search_property(
'video:height', webpage, default=None))
video_elements = self._search_regex(
r'(?s)<div class="video-elements">(.*?)</div>',
webpage, 'video elements', default=None)
if not video_elements:
raise ExtractorError(
'No sources found for video %s. Maybe an image?' % video_id,
expected=True)
formats = []
for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
formats.append({
'format_id': m.group('type').partition('/')[2],
'url': self._proto_relative_url(m.group('src')),
'ext': mimetype2ext(m.group('type')),
'width': width,
'height': height,
'http_headers': {
'User-Agent': 'youtube-dl (like wget)',
},
})
gif_json = self._search_regex(
r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
webpage, 'GIF code', fatal=False)
if gif_json:
gifd = self._parse_json(
gif_json, video_id, transform_source=js_to_json)
formats.append({
'format_id': 'gif',
'preference': -10,
'width': width,
'height': height,
'ext': 'gif',
'acodec': 'none',
'vcodec': 'gif',
'container': 'gif',
'url': self._proto_relative_url(gifd['gifUrl']),
'filesize': gifd.get('size'),
'http_headers': {
'User-Agent': 'youtube-dl (like wget)',
},
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': self._og_search_title(webpage, default=video_id),
}
class ImgurGalleryIE(InfoExtractor):
IE_NAME = 'imgur:gallery'
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'http://imgur.com/gallery/Q95ko',
'info_dict': {
'id': 'Q95ko',
'title': 'Adding faces make every GIF better',
},
'playlist_count': 25,
}, {
'url': 'http://imgur.com/topic/Aww/ll5Vk',
'only_matching': True,
}, {
'url': 'https://imgur.com/gallery/YcAQlkx',
'info_dict': {
'id': 'YcAQlkx',
'ext': 'mp4',
'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
}
}, {
'url': 'http://imgur.com/topic/Funny/N8rOudd',
'only_matching': True,
}, {
'url': 'http://imgur.com/r/aww/VQcQPhM',
'only_matching': True,
}]
def _real_extract(self, url):
gallery_id = self._match_id(url)
data = self._download_json(
'https://imgur.com/gallery/%s.json' % gallery_id,
gallery_id)['data']['image']
if data.get('is_album'):
entries = [
self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])
for image in data['album_images']['images'] if image.get('hash')]
return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))
return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)
class ImgurAlbumIE(ImgurGalleryIE):
IE_NAME = 'imgur:album'
_VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'http://imgur.com/a/j6Orj',
'info_dict': {
'id': 'j6Orj',
'title': 'A Literary Analysis of "Star Wars: The Force Awakens"',
},
'playlist_count': 12,
}]
| unlicense |
franky88/emperioanimesta | env/Lib/site-packages/wheel/egg2wheel.py | 471 | 2633 | #!/usr/bin/env python
import os.path
import re
import sys
import tempfile
import zipfile
import wheel.bdist_wheel
import shutil
import distutils.dist
from distutils.archive_util import make_archive
from argparse import ArgumentParser
from glob import iglob
egg_info_re = re.compile(r'''(?P<name>.+?)-(?P<ver>.+?)
(-(?P<pyver>.+?))?(-(?P<arch>.+?))?.egg''', re.VERBOSE)
def egg2wheel(egg_path, dest_dir):
egg_info = egg_info_re.match(os.path.basename(egg_path)).groupdict()
dir = tempfile.mkdtemp(suffix="_e2w")
if os.path.isfile(egg_path):
# assume we have a bdist_egg otherwise
egg = zipfile.ZipFile(egg_path)
egg.extractall(dir)
else:
# support buildout-style installed eggs directories
for pth in os.listdir(egg_path):
src = os.path.join(egg_path, pth)
if os.path.isfile(src):
shutil.copy2(src, dir)
else:
shutil.copytree(src, os.path.join(dir, pth))
dist_info = "%s-%s" % (egg_info['name'], egg_info['ver'])
abi = 'none'
pyver = egg_info['pyver'].replace('.', '')
arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_')
if arch != 'any':
# assume all binary eggs are for CPython
pyver = 'cp' + pyver[2:]
wheel_name = '-'.join((
dist_info,
pyver,
abi,
arch
))
bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution())
bw.root_is_purelib = egg_info['arch'] is None
dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
bw.egg2dist(os.path.join(dir, 'EGG-INFO'),
dist_info_dir)
bw.write_wheelfile(dist_info_dir, generator='egg2wheel')
bw.write_record(dir, dist_info_dir)
filename = make_archive(os.path.join(dest_dir, wheel_name), 'zip', root_dir=dir)
os.rename(filename, filename[:-3] + 'whl')
shutil.rmtree(dir)
def main():
parser = ArgumentParser()
parser.add_argument('eggs', nargs='*', help="Eggs to convert")
parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
for pat in args.eggs:
for egg in iglob(pat):
if args.verbose:
sys.stdout.write("{0}... ".format(egg))
egg2wheel(egg, args.dest_dir)
if args.verbose:
sys.stdout.write("OK\n")
if __name__ == "__main__":
main()
| gpl-3.0 |
h2oai/h2o-2 | py/testdir_single_jvm/test_parse_small_many_fvec.py | 9 | 1652 | import unittest, re, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
def writeRows(csvPathname,row,eol,repeat):
f = open(csvPathname, 'w')
for r in range(repeat):
f.write(row + eol)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
# SEED = h2o.setup_random_seed()
SEED = 6204672511291494176
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_small_many_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# can try the other two possibilities also
eol = "\n"
row = "a,b,c,d,e,f,g"
# need unique key name for upload and for parse, each time
# maybe just upload it once?
timeoutSecs = 10
node = h2o.nodes[0]
# fail rate is one in 200?
# need at least two rows (parser)
for sizeTrial in range(10):
size = random.randint(2,129)
print "\nparsing with rows:", size
csvFilename = "p" + "_" + str(size)
csvPathname = SYNDATASETS_DIR + "/" + csvFilename
writeRows(csvPathname,row,eol,size)
src_key = csvFilename
for trial in range(5):
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2i.import_parse(path=csvPathname, schema='put', src_key=src_key, hex_key=hex_key)
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
MrLoick/python-for-android | python-modules/twisted/twisted/internet/endpoints.py | 49 | 33917 | # -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
This also implements an extensible mini-language for describing endpoints,
parsed by the L{clientFromString} and L{serverFromString} functions.
@since: 10.1
"""
from zope.interface import implements, directlyProvides
import warnings
from twisted.internet import interfaces, defer, error
from twisted.internet.protocol import ClientFactory, Protocol
from twisted.plugin import getPlugins
from twisted.internet.interfaces import IStreamServerEndpointStringParser
from twisted.internet.interfaces import IStreamClientEndpointStringParser
from twisted.python.filepath import FilePath
__all__ = ["clientFromString", "serverFromString",
"TCP4ServerEndpoint", "TCP4ClientEndpoint",
"UNIXServerEndpoint", "UNIXClientEndpoint",
"SSL4ServerEndpoint", "SSL4ClientEndpoint"]
class _WrappingProtocol(Protocol):
"""
Wrap another protocol in order to notify my user when a connection has
been made.
@ivar _connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@ivar _wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
def __init__(self, connectedDeferred, wrappedProtocol):
"""
@param connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@param wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
self._connectedDeferred = connectedDeferred
self._wrappedProtocol = wrappedProtocol
if interfaces.IHalfCloseableProtocol.providedBy(
self._wrappedProtocol):
directlyProvides(self, interfaces.IHalfCloseableProtocol)
def connectionMade(self):
"""
Connect the C{self._wrappedProtocol} to our C{self.transport} and
callback C{self._connectedDeferred} with the C{self._wrappedProtocol}
"""
self._wrappedProtocol.makeConnection(self.transport)
self._connectedDeferred.callback(self._wrappedProtocol)
def dataReceived(self, data):
"""
Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.dataReceived(data)
def connectionLost(self, reason):
"""
Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.connectionLost(reason)
def readConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.readConnectionLost()
def writeConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.writeConnectionLost()
class _WrappingFactory(ClientFactory):
"""
Wrap a factory in order to wrap the protocols it builds.
@ivar _wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
@ivar _onConnection: An L{Deferred} that fires when the protocol is
connected
"""
protocol = _WrappingProtocol
def __init__(self, wrappedFactory, canceller):
"""
@param wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
@param canceller: An object that will be called to cancel the
L{self._onConnection} L{Deferred}
"""
self._wrappedFactory = wrappedFactory
self._onConnection = defer.Deferred(canceller=canceller)
def buildProtocol(self, addr):
"""
Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback
the C{self._onConnection} L{Deferred}.
@return: An instance of L{_WrappingProtocol} or C{None}
"""
try:
proto = self._wrappedFactory.buildProtocol(addr)
except:
self._onConnection.errback()
else:
return self.protocol(self._onConnection, proto)
def clientConnectionFailed(self, connector, reason):
"""
Errback the C{self._onConnection} L{Deferred} when the
client connection fails.
"""
self._onConnection.errback(reason)
class TCP4ServerEndpoint(object):
"""
TCP server endpoint with an IPv4 configuration
@ivar _reactor: An L{IReactorTCP} provider.
@type _port: int
@ivar _port: The port number on which to listen for incoming connections.
@type _backlog: int
@ivar _backlog: size of the listen queue
@type _interface: str
@ivar _interface: the hostname to bind to, defaults to '' (all)
"""
implements(interfaces.IStreamServerEndpoint)
def __init__(self, reactor, port, backlog=50, interface=''):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used listening
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
self._reactor = reactor
self._port = port
self._listenArgs = dict(backlog=50, interface='')
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a TCP socket
"""
return defer.execute(self._reactor.listenTCP,
self._port,
protocolFactory,
backlog=self._backlog,
interface=self._interface)
class TCP4ClientEndpoint(object):
"""
TCP client endpoint with an IPv4 configuration.
@ivar _reactor: An L{IReactorTCP} provider.
@type _host: str
@ivar _host: The hostname to connect to as a C{str}
@type _port: int
@ivar _port: The port to connect to as C{int}
@type _timeout: int
@ivar _timeout: number of seconds to wait before assuming the
connection has failed.
@type _bindAddress: tuple
@type _bindAddress: a (host, port) tuple of local address to bind
to, or None.
"""
implements(interfaces.IStreamClientEndpoint)
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorTCP} provider
@param host: A hostname, used when connecting
@param port: The port number, used when connecting
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port tuple of local address to bind to,
or None.
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP.
"""
def _canceller(deferred):
connector.stopConnecting()
deferred.errback(
error.ConnectingCancelledError(connector.getDestination()))
try:
wf = _WrappingFactory(protocolFactory, _canceller)
connector = self._reactor.connectTCP(
self._host, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
class SSL4ServerEndpoint(object):
"""
SSL secured TCP server endpoint with an IPv4 configuration.
@ivar _reactor: An L{IReactorSSL} provider.
@type _host: str
@ivar _host: The hostname to connect to as a C{str}
@type _port: int
@ivar _port: The port to connect to as C{int}
@type _sslContextFactory: L{OpenSSLCertificateOptions}
@var _sslContextFactory: SSL Configuration information as an
L{OpenSSLCertificateOptions}
@type _backlog: int
@ivar _backlog: size of the listen queue
@type _interface: str
@ivar _interface: the hostname to bind to, defaults to '' (all)
"""
implements(interfaces.IStreamServerEndpoint)
def __init__(self, reactor, port, sslContextFactory,
backlog=50, interface=''):
"""
@param reactor: An L{IReactorSSL} provider.
@param port: The port number used listening
@param sslContextFactory: An instance of
L{twisted.internet._sslverify.OpenSSLCertificateOptions}.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port tuple of local address to bind to,
or None.
"""
self._reactor = reactor
self._port = port
self._sslContextFactory = sslContextFactory
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen for SSL on a
TCP socket.
"""
return defer.execute(self._reactor.listenSSL, self._port,
protocolFactory,
contextFactory=self._sslContextFactory,
backlog=self._backlog,
interface=self._interface)
class SSL4ClientEndpoint(object):
"""
SSL secured TCP client endpoint with an IPv4 configuration
@ivar _reactor: An L{IReactorSSL} provider.
@type _host: str
@ivar _host: The hostname to connect to as a C{str}
@type _port: int
@ivar _port: The port to connect to as C{int}
@type _sslContextFactory: L{OpenSSLCertificateOptions}
@var _sslContextFactory: SSL Configuration information as an
L{OpenSSLCertificateOptions}
@type _timeout: int
@ivar _timeout: number of seconds to wait before assuming the
connection has failed.
@type _bindAddress: tuple
@ivar _bindAddress: a (host, port) tuple of local address to bind
to, or None.
"""
implements(interfaces.IStreamClientEndpoint)
def __init__(self, reactor, host, port, sslContextFactory,
timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorSSL} provider.
@param host: A hostname, used when connecting
@param port: The port number, used when connecting
@param sslContextFactory: SSL Configuration information as An instance
of L{OpenSSLCertificateOptions}.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port tuple of local address to bind to,
or None.
"""
self._reactor = reactor
self._host = host
self._port = port
self._sslContextFactory = sslContextFactory
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect with SSL over
TCP.
"""
def _canceller(deferred):
connector.stopConnecting()
deferred.errback(
error.ConnectingCancelledError(connector.getDestination()))
try:
wf = _WrappingFactory(protocolFactory, _canceller)
connector = self._reactor.connectSSL(
self._host, self._port, wf, self._sslContextFactory,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
class UNIXServerEndpoint(object):
"""
UnixSocket server endpoint.
@type path: str
@ivar path: a path to a unix socket on the filesystem.
@type _listenArgs: dict
@ivar _listenArgs: A C{dict} of keyword args that will be passed
to L{IReactorUNIX.listenUNIX}
@var _reactor: An L{IReactorTCP} provider.
"""
implements(interfaces.IStreamServerEndpoint)
def __init__(self, reactor, address, backlog=50, mode=0666, wantPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param address: The path to the Unix socket file, used when listening
@param listenArgs: An optional dict of keyword args that will be
passed to L{IReactorUNIX.listenUNIX}
@param backlog: number of connections to allow in backlog.
@param mode: mode to set on the unix socket. This parameter is
deprecated. Permissions should be set on the directory which
contains the UNIX socket.
@param wantPID: if True, create a pidfile for the socket.
"""
self._reactor = reactor
self._address = address
self._backlog = backlog
self._mode = mode
self._wantPID = wantPID
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket.
"""
return defer.execute(self._reactor.listenUNIX, self._address,
protocolFactory,
backlog=self._backlog,
mode=self._mode,
wantPID=self._wantPID)
class UNIXClientEndpoint(object):
"""
UnixSocket client endpoint.
@type _path: str
@ivar _path: a path to a unix socket on the filesystem.
@type _timeout: int
@ivar _timeout: number of seconds to wait before assuming the connection
has failed.
@type _checkPID: bool
@ivar _checkPID: if True, check for a pid file to verify that a server
is listening.
@var _reactor: An L{IReactorUNIX} provider.
"""
implements(interfaces.IStreamClientEndpoint)
def __init__(self, reactor, path, timeout=30, checkPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param path: The path to the Unix socket file, used when connecting
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param checkPID: if True, check for a pid file to verify that a server
is listening.
"""
self._reactor = reactor
self._path = path
self._timeout = timeout
self._checkPID = checkPID
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via a
UNIX Socket
"""
def _canceller(deferred):
connector.stopConnecting()
deferred.errback(
error.ConnectingCancelledError(connector.getDestination()))
try:
wf = _WrappingFactory(protocolFactory, _canceller)
connector = self._reactor.connectUNIX(
self._path, wf,
timeout=self._timeout,
checkPID=self._checkPID)
return wf._onConnection
except:
return defer.fail()
def _parseTCP(factory, port, interface="", backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a TCP(IPv4) stream endpoint into the structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
to L{TCP4ServerEndpoint}.
"""
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param address: the pathname of the unix socket
@type address: C{str}
@param backlog: the length of the listen queue
@type backlog: C{str}
@param lockfile: A string '0' or '1', mapping to True and False
respectively. See the C{wantPID} argument to C{listenUNIX}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenUNIX} (or, modulo argument 2, the factory,
arguments to L{UNIXServerEndpoint}.
"""
return (
(address, factory),
{'mode': int(mode, 8), 'backlog': int(backlog),
'wantPID': bool(int(lockfile))})
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for an SSL (over TCP/IPv4) stream endpoint into the structured
arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@param privateKey: The file name of a PEM format private key file.
@type privateKey: C{str}
@param certKey: The file name of a PEM format certificate file.
@type certKey: C{str}
@param sslmethod: The string name of an SSL method, based on the name of a
constant in C{OpenSSL.SSL}. Must be one of: "SSLv23_METHOD",
"SSLv2_METHOD", "SSLv3_METHOD", "TLSv1_METHOD".
@type sslmethod: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments
to L{SSL4ServerEndpoint}.
"""
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['sslmethod'] = getattr(ssl.SSL, sslmethod)
cf = ssl.DefaultOpenSSLContextFactory(privateKey, certKey, **kw)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
_serverParsers = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL}
_OP, _STRING = range(2)
def _tokenize(description):
"""
Tokenize a strports string and yield each token.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: an iterable of 2-tuples of (L{_OP} or L{_STRING}, string). Tuples
starting with L{_OP} will contain a second element of either ':' (i.e.
'next parameter') or '=' (i.e. 'assign parameter value'). For example,
the string 'hello:greet\=ing=world' would result in a generator
yielding these values::
_STRING, 'hello'
_OP, ':'
_STRING, 'greet=ing'
_OP, '='
_STRING, 'world'
"""
current = ''
ops = ':='
nextOps = {':': ':=', '=': ':'}
description = iter(description)
for n in description:
if n in ops:
yield _STRING, current
yield _OP, n
current = ''
ops = nextOps[n]
elif n == '\\':
current += description.next()
else:
current += n
yield _STRING, current
def _parse(description):
"""
Convert a description string into a list of positional and keyword
parameters, using logic vaguely like what Python does.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all
':'-separated C{str}s not containing an '=' and 'kwargs' is a map of
all C{str}s which do contain an '='. For example, the result of
C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}.
"""
args, kw = [], {}
def add(sofar):
if len(sofar) == 1:
args.append(sofar[0])
else:
kw[sofar[0]] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value == ':':
add(sofar)
sofar = ()
add(sofar)
return args, kw
# Mappings from description "names" to endpoint constructors.
_endpointServerFactories = {
'TCP': TCP4ServerEndpoint,
'SSL': SSL4ServerEndpoint,
'UNIX': UNIXServerEndpoint,
}
_endpointClientFactories = {
'TCP': TCP4ClientEndpoint,
'SSL': SSL4ClientEndpoint,
'UNIX': UNIXClientEndpoint,
}
_NO_DEFAULT = object()
def _parseServer(description, factory, default=None):
"""
Parse a stports description into a 2-tuple of arguments and keyword values.
@param description: A description in the format explained by
L{serverFromString}.
@type description: C{str}
@param factory: A 'factory' argument; this is left-over from
twisted.application.strports, it's not really used.
@type factory: L{IProtocolFactory} or L{None}
@param default: Deprecated argument, specifying the default parser mode to
use for unqualified description strings (those which do not have a ':'
and prefix).
@type default: C{str} or C{NoneType}
@return: a 3-tuple of (plugin or name, arguments, keyword arguments)
"""
args, kw = _parse(description)
if not args or (len(args) == 1 and not kw):
deprecationMessage = (
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:%s'."
% (description,))
if default is None:
default = 'tcp'
warnings.warn(
deprecationMessage, category=DeprecationWarning, stacklevel=4)
elif default is _NO_DEFAULT:
raise ValueError(deprecationMessage)
# If the default has been otherwise specified, the user has already
# been warned.
args[0:0] = [default]
endpointType = args[0]
parser = _serverParsers.get(endpointType)
if parser is None:
for plugin in getPlugins(IStreamServerEndpointStringParser):
if plugin.prefix == endpointType:
return (plugin, args[1:], kw)
raise ValueError("Unknown endpoint type: '%s'" % (endpointType,))
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
def _serverFromStringLegacy(reactor, description, default):
"""
Underlying implementation of L{serverFromString} which avoids exposing the
deprecated 'default' argument to anything but L{strports.service}.
"""
nameOrPlugin, args, kw = _parseServer(description, None, default)
if type(nameOrPlugin) is not str:
plugin = nameOrPlugin
return plugin.parseStreamServer(reactor, *args, **kw)
else:
name = nameOrPlugin
# Chop out the factory.
args = args[:1] + args[2:]
return _endpointServerFactories[name](reactor, *args, **kw)
def serverFromString(reactor, description):
"""
Construct a stream server endpoint from an endpoint description string.
The format for server endpoint descriptions is a simple string. It is a
prefix naming the type of endpoint, then a colon, then the arguments for
that endpoint.
For example, you can call it like this to create an endpoint that will
listen on TCP port 80::
serverFromString(reactor, "tcp:80")
Additional arguments may be specified as keywords, separated with colons.
For example, you can specify the interface for a TCP server endpoint to
bind to like this::
serverFromString(reactor, "tcp:80:interface=127.0.0.1")
SSL server endpoints may be specified with the 'ssl' prefix, and the
private key and certificate files may be specified by the C{privateKey} and
C{certKey} arguments::
serverFromString(reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem")
If a private key file name (C{privateKey}) isn't provided, a "server.pem"
file is assumed to exist which contains the private key. If the certificate
file name (C{certKey}) isn't provided, the private key file is assumed to
contain the certificate as well.
You may escape colons in arguments with a backslash, which you will need to
use if you want to specify a full pathname argument on Windows::
serverFromString(reactor,
"ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem")
finally, the 'unix' prefix may be used to specify a filesystem UNIX socket,
optionally with a 'mode' argument to specify the mode of the socket file
created by C{listen}::
serverFromString(reactor, "unix:/var/run/finger")
serverFromString(reactor, "unix:/var/run/finger:mode=660")
This function is also extensible; new endpoint types may be registered as
L{IStreamServerEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The server endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@return: A new endpoint which can be used to listen with the parameters
given by by C{description}.
@rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>}
@raise ValueError: when the 'description' string cannot be parsed.
@since: 10.2
"""
return _serverFromStringLegacy(reactor, description, _NO_DEFAULT)
def quoteStringArgument(argument):
"""
Quote an argument to L{serverFromString} and L{clientFromString}. Since
arguments are separated with colons and colons are escaped with
backslashes, some care is necessary if, for example, you have a pathname,
you may be tempted to interpolate into a string like this::
serverFromString("ssl:443:privateKey=%s" % (myPathName,))
This may appear to work, but will have portability issues (Windows
pathnames, for example). Usually you should just construct the appropriate
endpoint type rather than interpolating strings, which in this case would
be L{SSL4ServerEndpoint}. There are some use-cases where you may need to
generate such a string, though; for example, a tool to manipulate a
configuration file which has strports descriptions in it. To be correct in
those cases, do this instead::
serverFromString("ssl:443:privateKey=%s" %
(quoteStringArgument(myPathName),))
@param argument: The part of the endpoint description string you want to
pass through.
@type argument: C{str}
@return: The quoted argument.
@rtype: C{str}
"""
return argument.replace('\\', '\\\\').replace(':', '\\:')
def _parseClientTCP(**kwargs):
"""
Perform any argument value coercion necessary for TCP client parameters.
Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
arguments.
@return: The coerced values as a C{dict}.
"""
kwargs['port'] = int(kwargs['port'])
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
def _loadCAsFromDir(directoryPath):
"""
Load certificate-authority certificate objects in a given directory.
@param directoryPath: a L{FilePath} pointing at a directory to load .pem
files from.
@return: a C{list} of L{OpenSSL.crypto.X509} objects.
"""
from twisted.internet import ssl
caCerts = {}
for child in directoryPath.children():
if not child.basename().split('.')[-1].lower() == 'pem':
continue
try:
data = child.getContent()
except IOError:
# Permission denied, corrupt disk, we don't care.
continue
try:
theCert = ssl.Certificate.loadPEM(data)
except ssl.SSL.Error:
# Duplicate certificate, invalid certificate, etc. We don't care.
pass
else:
caCerts[theCert.digest()] = theCert.original
return caCerts.values()
def _parseClientSSL(**kwargs):
"""
Perform any argument value coercion necessary for SSL client parameters.
Valid keyword arguments to this function are all L{IReactorSSL.connectSSL}
arguments except for C{contextFactory}. Instead, C{certKey} (the path name
of the certificate file) C{privateKey} (the path name of the private key
associated with the certificate) are accepted and used to construct a
context factory.
@param caCertsDir: The one parameter which is not part of
L{IReactorSSL.connectSSL}'s signature, this is a path name used to
construct a list of certificate authority certificates. The directory
will be scanned for files ending in C{.pem}, all of which will be
considered valid certificate authorities for this connection.
@type caCertsDir: C{str}
@return: The coerced values as a C{dict}.
"""
from twisted.internet import ssl
kwargs = _parseClientTCP(**kwargs)
certKey = kwargs.pop('certKey', None)
privateKey = kwargs.pop('privateKey', None)
caCertsDir = kwargs.pop('caCertsDir', None)
if certKey is not None:
certx509 = ssl.Certificate.loadPEM(
FilePath(certKey).getContent()).original
else:
certx509 = None
if privateKey is not None:
privateKey = ssl.PrivateCertificate.loadPEM(
FilePath(privateKey).getContent()).privateKey.original
else:
privateKey = None
if caCertsDir is not None:
verify = True
caCerts = _loadCAsFromDir(FilePath(caCertsDir))
else:
verify = False
caCerts = None
kwargs['sslContextFactory'] = ssl.CertificateOptions(
method=ssl.SSL.SSLv23_METHOD,
certificate=certx509,
privateKey=privateKey,
verify=verify,
caCerts=caCerts
)
return kwargs
def _parseClientUNIX(**kwargs):
"""
Perform any argument value coercion necessary for UNIX client parameters.
Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
arguments except for C{checkPID}. Instead, C{lockfile} is accepted and has
the same meaning.
@return: The coerced values as a C{dict}.
"""
try:
kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
_clientParsers = {
'TCP': _parseClientTCP,
'SSL': _parseClientSSL,
'UNIX': _parseClientUNIX,
}
def clientFromString(reactor, description):
"""
Construct a client endpoint from a description string.
Client description strings are much like server description strings,
although they take all of their arguments as keywords, since even the
simplest client endpoint (plain TCP) requires at least 2 arguments (host
and port) to construct.
You can create a TCP client endpoint with the 'host' and 'port' arguments,
like so::
clientFromString(reactor, "tcp:host=www.example.com:port=80")
or an SSL client endpoint with those arguments, plus the arguments used by
the server SSL, for a client certificate::
clientFromString(reactor, "ssl:host=web.example.com:port=443:"
"privateKey=foo.pem:certKey=foo.pem")
to specify your certificate trust roots, you can identify a directory with
PEM files in it with the C{caCertsDir} argument::
clientFromString(reactor, "ssl:host=web.example.com:port=443:"
"caCertsDir=/etc/ssl/certs")
This function is also extensible; new endpoint types may be registered as
L{IStreamClientEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The client endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@return: A new endpoint which can be used to connect with the parameters
given by by C{description}.
@rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>}
@since: 10.2
"""
args, kwargs = _parse(description)
aname = args.pop(0)
name = aname.upper()
for plugin in getPlugins(IStreamClientEndpointStringParser):
if plugin.prefix.upper() == name:
return plugin.parseStreamClient(*args, **kwargs)
if name not in _clientParsers:
raise ValueError("Unknown endpoint type: %r" % (aname,))
kwargs = _clientParsers[name](*args, **kwargs)
return _endpointClientFactories[name](reactor, **kwargs)
| apache-2.0 |
gotcha/Selenium2Library | demo/demoapp/server.py | 48 | 2648 | #!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple HTTP server requiring only Python and no other preconditions.
Server is started by running this script with argument 'start' and
optional port number (default port 7272). Server root is the same
directory where this script is situated. Server can be stopped either
using Ctrl-C or running this script with argument 'stop' and same port
number as when starting it.
"""
import os
import sys
import httplib
import BaseHTTPServer
import SimpleHTTPServer
DEFAULT_PORT = 7272
DEFAULT_HOST = 'localhost'
class StoppableHttpServer(BaseHTTPServer.HTTPServer):
def serve_forever(self):
self.stop = False
while not self.stop:
try:
self.handle_request()
except KeyboardInterrupt:
break
class StoppableHttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_QUIT(self):
self.send_response(200)
self.end_headers()
self.server.stop = True
def do_POST(self):
# We could also process paremeters here using something like below.
# length = self.headers['Content-Length']
# print self.rfile.read(int(length))
self.do_GET()
def start_server(host=DEFAULT_HOST, port=DEFAULT_PORT):
print "Demo application starting on port %s" % port
root = os.path.dirname(os.path.abspath(__file__))
os.chdir(root)
server = StoppableHttpServer((host, int(port)), StoppableHttpRequestHandler)
server.serve_forever()
def stop_server(host=DEFAULT_HOST, port=DEFAULT_PORT):
print "Demo application on port %s stopping" % port
conn = httplib.HTTPConnection("%s:%s" % (host, port))
conn.request("QUIT", "/")
conn.getresponse()
def print_help():
print __doc__
if __name__ == '__main__':
try:
{'start': start_server,
'stop': stop_server,
'help': print_help}[sys.argv[1]](*sys.argv[2:])
except (IndexError, KeyError, TypeError):
print 'Usage: %s start|stop|help [port]' % os.path.basename(sys.argv[0])
| apache-2.0 |
jymannob/CouchPotatoServer | couchpotato/core/notifications/notifymywp.py | 25 | 2136 | from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from pynmwp import PyNMWP
import six
log = CPLog(__name__)
autoload = 'NotifyMyWP'
class NotifyMyWP(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
keys = splitString(self.conf('api_key'))
p = PyNMWP(keys, self.conf('dev_key'))
response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1)
for key in keys:
if not response[key]['Code'] == six.u('200'):
log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message']))
return False
return response
config = [{
'name': 'notifymywp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymywp',
'label': 'Windows Phone',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'description': 'Multiple keys seperated by a comma. Maximum of 5.'
},
{
'name': 'dev_key',
'advanced': True,
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
ampax/edx-platform-backup | common/test/acceptance/pages/lms/edxnotes.py | 19 | 15856 | from bok_choy.page_object import PageObject, PageLoadError, unguarded
from bok_choy.promise import BrokenPromise
from .course_page import CoursePage
from ...tests.helpers import disable_animations
from selenium.webdriver.common.action_chains import ActionChains
class NoteChild(PageObject):
url = None
BODY_SELECTOR = None
def __init__(self, browser, item_id):
super(NoteChild, self).__init__(browser)
self.item_id = item_id
def is_browser_on_page(self):
return self.q(css="{}#{}".format(self.BODY_SELECTOR, self.item_id)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `NoteChild` context
"""
return "{}#{} {}".format(
self.BODY_SELECTOR,
self.item_id,
selector,
)
def _get_element_text(self, selector):
element = self.q(css=self._bounded_selector(selector)).first
if element:
return element.text[0]
else:
return None
class EdxNotesPageGroup(NoteChild):
"""
Helper class that works with note groups on Note page of the course.
"""
BODY_SELECTOR = ".note-group"
@property
def title(self):
return self._get_element_text(".course-title")
@property
def subtitles(self):
return [section.title for section in self.children]
@property
def children(self):
children = self.q(css=self._bounded_selector('.note-section'))
return [EdxNotesPageSection(self.browser, child.get_attribute("id")) for child in children]
class EdxNotesPageSection(NoteChild):
"""
Helper class that works with note sections on Note page of the course.
"""
BODY_SELECTOR = ".note-section"
@property
def title(self):
return self._get_element_text(".course-subtitle")
@property
def children(self):
children = self.q(css=self._bounded_selector('.note'))
return [EdxNotesPageItem(self.browser, child.get_attribute("id")) for child in children]
@property
def notes(self):
return [section.text for section in self.children]
class EdxNotesPageItem(NoteChild):
"""
Helper class that works with note items on Note page of the course.
"""
BODY_SELECTOR = ".note"
UNIT_LINK_SELECTOR = "a.reference-unit-link"
def go_to_unit(self, unit_page=None):
self.q(css=self._bounded_selector(self.UNIT_LINK_SELECTOR)).click()
if unit_page is not None:
unit_page.wait_for_page()
@property
def unit_name(self):
return self._get_element_text(self.UNIT_LINK_SELECTOR)
@property
def text(self):
return self._get_element_text(".note-comment-p")
@property
def quote(self):
return self._get_element_text(".note-excerpt")
@property
def time_updated(self):
return self._get_element_text(".reference-updated-date")
class EdxNotesPageView(PageObject):
"""
Base class for EdxNotes views: Recent Activity, Location in Course, Search Results.
"""
url = None
BODY_SELECTOR = ".tab-panel"
TAB_SELECTOR = ".tab"
CHILD_SELECTOR = ".note"
CHILD_CLASS = EdxNotesPageItem
@unguarded
def visit(self):
"""
Open the page containing this page object in the browser.
Raises:
PageLoadError: The page did not load successfully.
Returns:
PageObject
"""
self.q(css=self.TAB_SELECTOR).first.click()
try:
return self.wait_for_page()
except (BrokenPromise):
raise PageLoadError("Timed out waiting to load page '{!r}'".format(self))
def is_browser_on_page(self):
return all([
self.q(css="{}".format(self.BODY_SELECTOR)).present,
self.q(css="{}.is-active".format(self.TAB_SELECTOR)).present,
not self.q(css=".ui-loading").visible,
])
@property
def is_closable(self):
"""
Indicates if tab is closable or not.
"""
return self.q(css="{} .action-close".format(self.TAB_SELECTOR)).present
def close(self):
"""
Closes the tab.
"""
self.q(css="{} .action-close".format(self.TAB_SELECTOR)).first.click()
@property
def children(self):
"""
Returns all notes on the page.
"""
children = self.q(css=self.CHILD_SELECTOR)
return [self.CHILD_CLASS(self.browser, child.get_attribute("id")) for child in children]
class RecentActivityView(EdxNotesPageView):
"""
Helper class for Recent Activity view.
"""
BODY_SELECTOR = "#recent-panel"
TAB_SELECTOR = ".tab#view-recent-activity"
class CourseStructureView(EdxNotesPageView):
"""
Helper class for Location in Course view.
"""
BODY_SELECTOR = "#structure-panel"
TAB_SELECTOR = ".tab#view-course-structure"
CHILD_SELECTOR = ".note-group"
CHILD_CLASS = EdxNotesPageGroup
class SearchResultsView(EdxNotesPageView):
"""
Helper class for Search Results view.
"""
BODY_SELECTOR = "#search-results-panel"
TAB_SELECTOR = ".tab#view-search-results"
class EdxNotesPage(CoursePage):
"""
EdxNotes page.
"""
url_path = "edxnotes/"
MAPPING = {
"recent": RecentActivityView,
"structure": CourseStructureView,
"search": SearchResultsView,
}
def __init__(self, *args, **kwargs):
super(EdxNotesPage, self).__init__(*args, **kwargs)
self.current_view = self.MAPPING["recent"](self.browser)
def is_browser_on_page(self):
return self.q(css=".wrapper-student-notes").present
def switch_to_tab(self, tab_name):
"""
Switches to the appropriate tab `tab_name(str)`.
"""
self.current_view = self.MAPPING[tab_name](self.browser)
self.current_view.visit()
def close_tab(self, tab_name):
"""
Closes the tab `tab_name(str)`.
"""
self.current_view.close()
self.current_view = self.MAPPING["recent"](self.browser)
def search(self, text):
"""
Runs search with `text(str)` query.
"""
self.q(css="#search-notes-form #search-notes-input").first.fill(text)
self.q(css='#search-notes-form .search-notes-submit').first.click()
# Frontend will automatically switch to Search results tab when search
# is running, so the view also needs to be changed.
self.current_view = self.MAPPING["search"](self.browser)
if text.strip():
self.current_view.wait_for_page()
@property
def tabs(self):
"""
Returns all tabs on the page.
"""
tabs = self.q(css=".tabs .tab-label")
if tabs:
return map(lambda x: x.replace("Current tab\n", ""), tabs.text)
else:
return None
@property
def is_error_visible(self):
"""
Indicates whether error message is visible or not.
"""
return self.q(css=".inline-error").visible
@property
def error_text(self):
"""
Returns error message.
"""
element = self.q(css=".inline-error").first
if element and self.is_error_visible:
return element.text[0]
else:
return None
@property
def notes(self):
"""
Returns all notes on the page.
"""
children = self.q(css='.note')
return [EdxNotesPageItem(self.browser, child.get_attribute("id")) for child in children]
@property
def groups(self):
"""
Returns all groups on the page.
"""
children = self.q(css='.note-group')
return [EdxNotesPageGroup(self.browser, child.get_attribute("id")) for child in children]
@property
def sections(self):
"""
Returns all sections on the page.
"""
children = self.q(css='.note-section')
return [EdxNotesPageSection(self.browser, child.get_attribute("id")) for child in children]
@property
def no_content_text(self):
"""
Returns no content message.
"""
element = self.q(css=".is-empty").first
if element:
return element.text[0]
else:
return None
class EdxNotesUnitPage(CoursePage):
"""
Page for the Unit with EdxNotes.
"""
url_path = "courseware/"
def is_browser_on_page(self):
return self.q(css="body.courseware .edx-notes-wrapper").present
def move_mouse_to(self, selector):
"""
Moves mouse to the element that matches `selector(str)`.
"""
body = self.q(css=selector)[0]
ActionChains(self.browser).move_to_element(body).release().perform()
return self
def click(self, selector):
"""
Clicks on the element that matches `selector(str)`.
"""
self.q(css=selector).first.click()
return self
def toggle_visibility(self):
"""
Clicks on the "Show notes" checkbox.
"""
self.q(css=".action-toggle-notes").first.click()
return self
@property
def components(self):
"""
Returns a list of annotatable components.
"""
components = self.q(css=".edx-notes-wrapper")
return [AnnotatableComponent(self.browser, component.get_attribute("id")) for component in components]
@property
def notes(self):
"""
Returns a list of notes for the page.
"""
notes = []
for component in self.components:
notes.extend(component.notes)
return notes
def refresh(self):
"""
Refreshes the page and returns a list of annotatable components.
"""
self.browser.refresh()
return self.components
class AnnotatableComponent(NoteChild):
"""
Helper class that works with annotatable components.
"""
BODY_SELECTOR = ".edx-notes-wrapper"
@property
def notes(self):
"""
Returns a list of notes for the component.
"""
notes = self.q(css=self._bounded_selector(".annotator-hl"))
return [EdxNoteHighlight(self.browser, note, self.item_id) for note in notes]
def create_note(self, selector=".annotate-id"):
"""
Create the note by the selector, return a context manager that will
show and save the note popup.
"""
for element in self.q(css=self._bounded_selector(selector)):
note = EdxNoteHighlight(self.browser, element, self.item_id)
note.select_and_click_adder()
yield note
note.save()
def edit_note(self, selector=".annotator-hl"):
"""
Edit the note by the selector, return a context manager that will
show and save the note popup.
"""
for element in self.q(css=self._bounded_selector(selector)):
note = EdxNoteHighlight(self.browser, element, self.item_id)
note.show().edit()
yield note
note.save()
def remove_note(self, selector=".annotator-hl"):
"""
Removes the note by the selector.
"""
for element in self.q(css=self._bounded_selector(selector)):
note = EdxNoteHighlight(self.browser, element, self.item_id)
note.show().remove()
class EdxNoteHighlight(NoteChild):
"""
Helper class that works with notes.
"""
BODY_SELECTOR = ""
ADDER_SELECTOR = ".annotator-adder"
VIEWER_SELECTOR = ".annotator-viewer"
EDITOR_SELECTOR = ".annotator-editor"
def __init__(self, browser, element, parent_id):
super(EdxNoteHighlight, self).__init__(browser, parent_id)
self.element = element
self.item_id = parent_id
disable_animations(self)
@property
def is_visible(self):
"""
Returns True if the note is visible.
"""
viewer_is_visible = self.q(css=self._bounded_selector(self.VIEWER_SELECTOR)).visible
editor_is_visible = self.q(css=self._bounded_selector(self.EDITOR_SELECTOR)).visible
return viewer_is_visible or editor_is_visible
def wait_for_adder_visibility(self):
"""
Waiting for visibility of note adder button.
"""
self.wait_for_element_visibility(
self._bounded_selector(self.ADDER_SELECTOR), "Adder is visible."
)
def wait_for_viewer_visibility(self):
"""
Waiting for visibility of note viewer.
"""
self.wait_for_element_visibility(
self._bounded_selector(self.VIEWER_SELECTOR), "Note Viewer is visible."
)
def wait_for_editor_visibility(self):
"""
Waiting for visibility of note editor.
"""
self.wait_for_element_visibility(
self._bounded_selector(self.EDITOR_SELECTOR), "Note Editor is visible."
)
def wait_for_notes_invisibility(self, text="Notes are hidden"):
"""
Waiting for invisibility of all notes.
"""
selector = self._bounded_selector(".annotator-outer")
self.wait_for_element_invisibility(selector, text)
def select_and_click_adder(self):
"""
Creates selection for the element and clicks `add note` button.
"""
ActionChains(self.browser).double_click(self.element).release().perform()
self.wait_for_adder_visibility()
self.q(css=self._bounded_selector(self.ADDER_SELECTOR)).first.click()
self.wait_for_editor_visibility()
return self
def click_on_highlight(self):
"""
Clicks on the highlighted text.
"""
ActionChains(self.browser).move_to_element(self.element).click().release().perform()
return self
def click_on_viewer(self):
"""
Clicks on the note viewer.
"""
self.q(css=self._bounded_selector(self.VIEWER_SELECTOR)).first.click()
return self
def show(self):
"""
Hover over highlighted text -> shows note.
"""
ActionChains(self.browser).move_to_element(self.element).release().perform()
self.wait_for_viewer_visibility()
return self
def cancel(self):
"""
Clicks cancel button.
"""
self.q(css=self._bounded_selector(".annotator-cancel")).first.click()
self.wait_for_notes_invisibility("Note is canceled.")
return self
def save(self):
"""
Clicks save button.
"""
self.q(css=self._bounded_selector(".annotator-save")).first.click()
self.wait_for_notes_invisibility("Note is saved.")
self.wait_for_ajax()
return self
def remove(self):
"""
Clicks delete button.
"""
self.q(css=self._bounded_selector(".annotator-delete")).first.click()
self.wait_for_notes_invisibility("Note is removed.")
self.wait_for_ajax()
return self
def edit(self):
"""
Clicks edit button.
"""
self.q(css=self._bounded_selector(".annotator-edit")).first.click()
self.wait_for_editor_visibility()
return self
@property
def text(self):
"""
Returns text of the note.
"""
self.show()
element = self.q(css=self._bounded_selector(".annotator-annotation > div"))
if element:
text = element.text[0].strip()
else:
text = None
self.q(css=("body")).first.click()
self.wait_for_notes_invisibility()
return text
@text.setter
def text(self, value):
"""
Sets text for the note.
"""
self.q(css=self._bounded_selector(".annotator-item textarea")).first.fill(value)
| agpl-3.0 |
redhat-openstack/nova | nova/api/openstack/compute/schemas/v3/flavor_access.py | 110 | 1710 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
add_tenant_access = {
'type': 'object',
'properties': {
'addTenantAccess': {
'type': 'object',
'properties': {
'tenant': {
# defined from project_id in instance_type_projects table
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
},
'required': ['tenant'],
'additionalProperties': False,
},
},
'required': ['addTenantAccess'],
'additionalProperties': False,
}
remove_tenant_access = {
'type': 'object',
'properties': {
'removeTenantAccess': {
'type': 'object',
'properties': {
'tenant': {
# defined from project_id in instance_type_projects table
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
},
'required': ['tenant'],
'additionalProperties': False,
},
},
'required': ['removeTenantAccess'],
'additionalProperties': False,
}
| apache-2.0 |
ericzhou2008/zulip | api/integrations/perforce/zulip_change-commit.py | 114 | 2744 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Zulip notification change-commit hook.
In Perforce, The "change-commit" trigger is fired after a metadata has been
created, files have been transferred, and the changelist comitted to the depot
database.
This specific trigger expects command-line arguments in the form:
%change% %changeroot%
For example:
1234 //depot/security/src/
'''
import os
import sys
import os.path
import git_p4
__version__ = "0.1"
sys.path.insert(0, os.path.dirname(__file__))
import zulip_perforce_config as config
if config.ZULIP_API_PATH is not None:
sys.path.append(config.ZULIP_API_PATH)
import zulip
client = zulip.Client(
email=config.ZULIP_USER,
site=config.ZULIP_SITE,
api_key=config.ZULIP_API_KEY,
client="ZulipPerforce/" + __version__)
try:
changelist = int(sys.argv[1])
changeroot = sys.argv[2]
except IndexError:
print >> sys.stderr, "Wrong number of arguments.\n\n",
print >> sys.stderr, __doc__
sys.exit(-1)
except ValueError:
print >> sys.stderr, "First argument must be an integer.\n\n",
print >> sys.stderr, __doc__
sys.exit(-1)
metadata = git_p4.p4_describe(changelist)
destination = config.commit_notice_destination(changeroot, changelist)
if destination is None:
# Don't forward the notice anywhere
sys.exit(0)
message = """**{0}** committed revision @{1} to `{2}`.
> {3}
""".format(metadata["user"], metadata["change"], changeroot, metadata["desc"])
message_data = {
"type": "stream",
"to": destination["stream"],
"subject": destination["subject"],
"content": message,
}
client.send_message(message_data)
| apache-2.0 |
redhat-openstack/neutron | neutron/plugins/mlnx/agent/eswitch_neutron_agent.py | 8 | 17266 | # Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.openstack.common.gettextutils import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants as p_const
from neutron.plugins.mlnx.agent import utils
from neutron.plugins.mlnx.common import config # noqa
from neutron.plugins.mlnx.common import exceptions
LOG = logging.getLogger(__name__)
class EswitchManager(object):
def __init__(self, interface_mappings, endpoint, timeout):
self.utils = utils.EswitchUtils(endpoint, timeout)
self.interface_mappings = interface_mappings
self.network_map = {}
self.utils.define_fabric_mappings(interface_mappings)
def get_port_id_by_mac(self, port_mac):
for network_id, data in self.network_map.iteritems():
for port in data['ports']:
if port['port_mac'] == port_mac:
return port['port_id']
err_msg = _("Agent cache inconsistency - port id "
"is not stored for %s") % port_mac
LOG.error(err_msg)
raise exceptions.MlnxException(err_msg=err_msg)
def get_vnics_mac(self):
return set(self.utils.get_attached_vnics().keys())
def vnic_port_exists(self, port_mac):
return port_mac in self.utils.get_attached_vnics()
def remove_network(self, network_id):
if network_id in self.network_map:
del self.network_map[network_id]
else:
LOG.debug(_("Network %s not defined on Agent."), network_id)
def port_down(self, network_id, physical_network, port_mac):
"""Sets port to down.
Check internal network map for port data.
If port exists set port to Down
"""
for network_id, data in self.network_map.iteritems():
for port in data['ports']:
if port['port_mac'] == port_mac:
self.utils.port_down(physical_network, port_mac)
return
LOG.info(_('Network %s is not available on this agent'), network_id)
def port_up(self, network_id, network_type,
physical_network, seg_id, port_id, port_mac):
"""Sets port to up.
Update internal network map with port data.
- Check if vnic defined
- configure eswitch vport
- set port to Up
"""
LOG.debug(_("Connecting port %s"), port_id)
if network_id not in self.network_map:
self.provision_network(port_id, port_mac,
network_id, network_type,
physical_network, seg_id)
net_map = self.network_map[network_id]
net_map['ports'].append({'port_id': port_id, 'port_mac': port_mac})
if network_type == p_const.TYPE_VLAN:
LOG.info(_('Binding Segmentation ID %(seg_id)s'
'to eSwitch for vNIC mac_address %(mac)s'),
{'seg_id': seg_id,
'mac': port_mac})
self.utils.set_port_vlan_id(physical_network,
seg_id,
port_mac)
self.utils.port_up(physical_network, port_mac)
else:
LOG.error(_('Unsupported network type %s'), network_type)
def port_release(self, port_mac):
"""Clear port configuration from eSwitch."""
for network_id, net_data in self.network_map.iteritems():
for port in net_data['ports']:
if port['port_mac'] == port_mac:
self.utils.port_release(net_data['physical_network'],
port['port_mac'])
return
LOG.info(_('Port_mac %s is not available on this agent'), port_mac)
def provision_network(self, port_id, port_mac,
network_id, network_type,
physical_network, segmentation_id):
LOG.info(_("Provisioning network %s"), network_id)
if network_type == p_const.TYPE_VLAN:
LOG.debug(_("Creating VLAN Network"))
else:
LOG.error(_("Unknown network type %(network_type)s "
"for network %(network_id)s"),
{'network_type': network_type,
'network_id': network_id})
return
data = {
'physical_network': physical_network,
'network_type': network_type,
'ports': [],
'vlan_id': segmentation_id}
self.network_map[network_id] = data
class MlnxEswitchRpcCallbacks(n_rpc.RpcCallback,
sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, context, agent):
super(MlnxEswitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.eswitch = agent.eswitch
self.sg_agent = agent
def network_delete(self, context, **kwargs):
LOG.debug(_("network_delete received"))
network_id = kwargs.get('network_id')
if not network_id:
LOG.warning(_("Invalid Network ID, cannot remove Network"))
else:
LOG.debug(_("Delete network %s"), network_id)
self.eswitch.remove_network(network_id)
def port_update(self, context, **kwargs):
port = kwargs.get('port')
self.agent.add_port_update(port['mac_address'])
LOG.debug("port_update message processed for port with mac %s",
port['mac_address'])
class MlnxEswitchPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class MlnxEswitchNeutronAgent(sg_rpc.SecurityGroupAgentRpcMixin):
# Set RPC API version to 1.0 by default.
#RPC_API_VERSION = '1.0'
def __init__(self, interface_mapping):
self._polling_interval = cfg.CONF.AGENT.polling_interval
self._setup_eswitches(interface_mapping)
configurations = {'interface_mappings': interface_mapping}
self.agent_state = {
'binary': 'neutron-mlnx-agent',
'host': cfg.CONF.host,
'topic': q_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': q_constants.AGENT_TYPE_MLNX,
'start_flag': True}
# Stores port update notifications for processing in main rpc loop
self.updated_ports = set()
self._setup_rpc()
self.init_firewall()
def _setup_eswitches(self, interface_mapping):
daemon = cfg.CONF.ESWITCH.daemon_endpoint
timeout = cfg.CONF.ESWITCH.request_timeout
self.eswitch = EswitchManager(interface_mapping, daemon, timeout)
def _report_state(self):
try:
devices = len(self.eswitch.get_vnics_mac())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def _setup_rpc(self):
self.agent_id = 'mlnx-agent.%s' % socket.gethostname()
LOG.info(_("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.plugin_rpc = MlnxEswitchPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [MlnxEswitchRpcCallbacks(self.context, self)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def add_port_update(self, port):
self.updated_ports.add(port)
def scan_ports(self, previous, sync):
cur_ports = self.eswitch.get_vnics_mac()
port_info = {'current': cur_ports}
updated_ports = self.updated_ports
self.updated_ports = set()
if sync:
# Either it's the first iteration or previous iteration had
# problems.
port_info['added'] = cur_ports
port_info['removed'] = ((previous['removed'] | previous['current'])
- cur_ports)
port_info['updated'] = ((previous['updated'] | updated_ports)
& cur_ports)
else:
# Shouldn't process updates for not existing ports
port_info['added'] = cur_ports - previous['current']
port_info['removed'] = previous['current'] - cur_ports
port_info['updated'] = updated_ports & cur_ports
return port_info
def process_network_ports(self, port_info):
resync_a = False
resync_b = False
device_added_updated = port_info['added'] | port_info['updated']
if device_added_updated:
resync_a = self.treat_devices_added_or_updated(
device_added_updated)
if port_info['removed']:
resync_b = self.treat_devices_removed(port_info['removed'])
# If one of the above opertaions fails => resync with plugin
return (resync_a | resync_b)
def treat_vif_port(self, port_id, port_mac,
network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if self.eswitch.vnic_port_exists(port_mac):
if admin_state_up:
self.eswitch.port_up(network_id,
network_type,
physical_network,
segmentation_id,
port_id,
port_mac)
else:
self.eswitch.port_down(network_id, physical_network, port_mac)
else:
LOG.debug(_("No port %s defined on agent."), port_id)
def treat_devices_added_or_updated(self, devices):
try:
devs_details_list = self.plugin_rpc.get_devices_details_list(
self.context,
devices,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get device details for devices "
"with MAC address %(devices)s: due to %(exc)s",
{'devices': devices, 'exc': e})
# resync is needed
return True
for dev_details in devs_details_list:
device = dev_details['device']
LOG.info(_("Adding or updating port with mac %s"), device)
if 'port_id' in dev_details:
LOG.info(_("Port %s updated"), device)
LOG.debug("Device details %s", str(dev_details))
self.treat_vif_port(dev_details['port_id'],
dev_details['device'],
dev_details['network_id'],
dev_details['network_type'],
dev_details['physical_network'],
dev_details['segmentation_id'],
dev_details['admin_state_up'])
if dev_details.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
self.plugin_rpc.update_device_up(
self.context, device, self.agent_id)
else:
LOG.debug("Setting status for %s to DOWN", device)
self.plugin_rpc.update_device_down(
self.context, device, self.agent_id)
else:
LOG.debug("Device with mac_address %s not defined "
"on Neutron Plugin", device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_("Removing device with mac_address %s"), device)
try:
port_id = self.eswitch.get_port_id_by_mac(device)
dev_details = self.plugin_rpc.update_device_down(self.context,
port_id,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug(_("Removing port failed for device %(device)s "
"due to %(exc)s"), {'device': device, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_("Port %s updated."), device)
else:
LOG.debug(_("Device %s not defined on plugin"), device)
self.eswitch.port_release(device)
return resync
def _port_info_has_changes(self, port_info):
return (port_info['added'] or
port_info['removed'] or
port_info['updated'])
def daemon_loop(self):
LOG.info(_("eSwitch Agent Started!"))
sync = True
port_info = {'current': set(),
'added': set(),
'removed': set(),
'updated': set()}
while True:
start = time.time()
try:
port_info = self.scan_ports(previous=port_info, sync=sync)
except exceptions.RequestTimeout:
LOG.exception(_("Request timeout in agent event loop "
"eSwitchD is not responding - exiting..."))
raise SystemExit(1)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
sync = False
if self._port_info_has_changes(port_info):
LOG.debug("Starting to process devices in:%s", port_info)
try:
sync = self.process_network_ports(port_info)
except Exception:
LOG.exception(_LE("Error in agent event loop"))
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self._polling_interval):
time.sleep(self._polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)"),
{'polling_interval': self._polling_interval,
'elapsed': elapsed})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = q_utils.parse_mappings(
cfg.CONF.ESWITCH.physical_interface_mappings)
except ValueError as e:
LOG.error(_("Parsing physical_interface_mappings failed: %s."
" Agent terminated!"), e)
sys.exit(1)
LOG.info(_("Interface mappings: %s"), interface_mappings)
try:
agent = MlnxEswitchNeutronAgent(interface_mappings)
except Exception as e:
LOG.error(_("Failed on Agent initialisation : %s."
" Agent terminated!"), e)
sys.exit(1)
# Start everything.
LOG.info(_("Agent initialised successfully, now running... "))
agent.daemon_loop()
sys.exit(0)
if __name__ == '__main__':
main()
| apache-2.0 |
boundary/boundary-plugin-aws-redshift | boundary_aws_plugin/boundary_plugin.py | 8 | 4162 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import logging
import datetime
import time
import socket
import json
import multiprocessing
from contextlib import contextmanager
import sys
import os
HOSTNAME = socket.gethostname()
metric_log_file = None
plugin_params = None
keepalive_process = None
keepalive_lock = None
"""
If the plugin doesn't generate any output for 30 seconds (hard-coded), the
Boundary Relay thinks we're dead and kills us. Because we may not have any
data to output for much longer than that, we workaround this by outputting
a bogus metric every so often. This constant controls the delay between
bogus metrics; it should be significantly less than 30 seconds to prevent
any timing issues.
"""
KEEPALIVE_INTERVAL = 15
def log_metrics_to_file(filename):
"""
Logs all reported metrics to a file for debugging purposes.
@param filename File name to log to; specify None to disable logging.
"""
global metric_log_file
metric_log_file = filename
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.days * 86400 + delta.seconds + delta.microseconds / 1e6
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
@contextmanager
def maybe_lock(lock):
if lock: lock.acquire()
yield
if lock: lock.release()
return
def boundary_report_metric(name, value, source=None, timestamp=None):
"""
Reports a metric to the Boundary relay.
@param name Metric name, as defined in the plugin's plugin.json file.
@param value Metric value, should be a number.
@param source Metric source. Defaults to the machine's hostname.
@param timestamp Timestamp of the metric as a Python datetime object. Defaults to none
(Boundary uses the current time in that case).
"""
with maybe_lock(keepalive_lock) as _:
source = source or HOSTNAME
if timestamp:
timestamp = unix_time_millis(timestamp)
out = "%s %s %s%s" % (name, value, source, (' %d' % timestamp) if timestamp else '')
print(out)
# Flush stdout before we release the lock so output doesn't get intermixed
sys.stdout.flush()
global metric_log_file
if metric_log_file:
with open(metric_log_file, 'a') as f:
f.write(out + "\n")
def report_alive():
"""
Reports a bogus metric just so the Boundary Relay doesn't think we're dead.
See notes on KEEPALIVE_INTERVAL for more information.
"""
boundary_report_metric('BOGUS_METRIC', 0)
def parse_params():
"""
Parses and returns the contents of the plugin's "param.json" file.
"""
global plugin_params
if not plugin_params:
with open('param.json') as f:
plugin_params = json.loads(f.read())
return plugin_params
def sleep_interval():
"""
Sleeps for the plugin's poll interval, as configured in the plugin's parameters.
"""
params = parse_params()
time.sleep(float(params.get("pollInterval", 1000) / 1000))
def __keepalive_process_main(parent_pid):
# Workaround: on Linux, the Boundary Relay's sends SIGTERM to kill the plugin, which kills the main process but
# doesn't kill the keepalive process. We work around this by identifying that our parent has died (and
# accordingly, our parent is now init) and killing ourselves.
# Note that os.getppid() doesn't exist on Windows, hence the getattr workaround.
while parent_pid == getattr(os, 'getppid', lambda: parent_pid)():
report_alive()
time.sleep(KEEPALIVE_INTERVAL)
def start_keepalive_subprocess():
"""
Starts the subprocess that keeps us alive by reporting bogus metrics.
This function should be called only once on plugin startup.
See notes on KEEPALIVE_INTERVAL for more information.
"""
global keepalive_lock, keepalive_process
assert not keepalive_lock and not keepalive_process
keepalive_lock = multiprocessing.Lock()
keepalive_process = multiprocessing.Process(target=__keepalive_process_main, args=(os.getpid(),))
keepalive_process.start()
| apache-2.0 |
musicrighter/CIS422-P2 | env/lib/python3.4/site-packages/pymongo/periodic_executor.py | 16 | 4679 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import atexit
import threading
import time
import weakref
from pymongo import thread_util
from pymongo.monotonic import time as _time
class PeriodicExecutor(object):
def __init__(self, condition_class, interval, min_interval, target):
""""Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `condition_class`: A class like threading.Condition.
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
"""
self._event = thread_util.Event(condition_class)
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
self._stopped = False
started = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy=None):
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
Since this can be called from a weakref callback during garbage
collection it must take no locks! That means it cannot call wake().
"""
self._stopped = True
def join(self, timeout=None):
if self._thread is not None:
try:
self._thread.join(timeout)
except ReferenceError:
# Thread already terminated.
pass
def wake(self):
"""Execute the target function soon."""
self._event.set()
def _run(self):
while not self._stopped:
try:
if not self._target():
self._stopped = True
break
except:
self._stopped = True
raise
deadline = _time() + self._interval
# Avoid running too frequently if wake() is called very often.
time.sleep(self._min_interval)
# Until the deadline, wake often to check if close() was called.
while not self._stopped and _time() < deadline:
# Our Event's wait returns True if set, else False.
if self._event.wait(0.1):
# Someone called wake().
break
self._event.clear()
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
atexit.register(_shutdown_executors)
| artistic-2.0 |
nexiles/odoo | addons/l10n_be_coda/wizard/account_coda_import.py | 255 | 24127 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import tools
import logging
_logger = logging.getLogger(__name__)
class account_coda_import(osv.osv_memory):
_name = 'account.coda.import'
_description = 'Import CODA File'
_columns = {
'coda_data': fields.binary('CODA File', required=True),
'coda_fname': fields.char('CODA Filename', required=True),
'note': fields.text('Log'),
}
_defaults = {
'coda_fname': 'coda.txt',
}
def coda_parsing(self, cr, uid, ids, context=None, batch=False, codafile=None, codafilename=None):
if context is None:
context = {}
if batch:
codafile = str(codafile)
codafilename = codafilename
else:
data = self.browse(cr, uid, ids)[0]
try:
codafile = data.coda_data
codafilename = data.coda_fname
except:
raise osv.except_osv(_('Error'), _('Wizard in incorrect state. Please hit the Cancel button'))
return {}
recordlist = unicode(base64.decodestring(codafile), 'windows-1252', 'strict').split('\n')
statements = []
globalisation_comm = {}
for line in recordlist:
if not line:
pass
elif line[0] == '0':
#Begin of a new Bank statement
statement = {}
statements.append(statement)
statement['version'] = line[127]
if statement['version'] not in ['1', '2']:
raise osv.except_osv(_('Error') + ' R001', _('CODA V%s statements are not supported, please contact your bank') % statement['version'])
statement['globalisation_stack'] = []
statement['lines'] = []
statement['date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT, time.strptime(rmspaces(line[5:11]), '%d%m%y'))
statement['separateApplication'] = rmspaces(line[83:88])
elif line[0] == '1':
#Statement details
if statement['version'] == '1':
statement['acc_number'] = rmspaces(line[5:17])
statement['currency'] = rmspaces(line[18:21])
elif statement['version'] == '2':
if line[1] == '0': # Belgian bank account BBAN structure
statement['acc_number'] = rmspaces(line[5:17])
statement['currency'] = rmspaces(line[18:21])
elif line[1] == '1': # foreign bank account BBAN structure
raise osv.except_osv(_('Error') + ' R1001', _('Foreign bank accounts with BBAN structure are not supported '))
elif line[1] == '2': # Belgian bank account IBAN structure
statement['acc_number'] = rmspaces(line[5:21])
statement['currency'] = rmspaces(line[39:42])
elif line[1] == '3': # foreign bank account IBAN structure
raise osv.except_osv(_('Error') + ' R1002', _('Foreign bank accounts with IBAN structure are not supported '))
else: # Something else, not supported
raise osv.except_osv(_('Error') + ' R1003', _('Unsupported bank account structure '))
statement['journal_id'] = False
statement['bank_account'] = False
# Belgian Account Numbers are composed of 12 digits.
# In OpenERP, the user can fill the bank number in any format: With or without IBan code, with or without spaces, with or without '-'
# The two following sql requests handle those cases.
if len(statement['acc_number']) >= 12:
# If the Account Number is >= 12 digits, it is mostlikely a Belgian Account Number (With or without IBAN).
# The following request try to find the Account Number using a 'like' operator.
# So, if the Account Number is stored with IBAN code, it can be found thanks to this.
cr.execute("select id from res_partner_bank where replace(replace(acc_number,' ',''),'-','') like %s", ('%' + statement['acc_number'] + '%',))
else:
# This case is necessary to avoid cases like the Account Number in the CODA file is set to a single or few digits,
# and so a 'like' operator would return the first account number in the database which matches.
cr.execute("select id from res_partner_bank where replace(replace(acc_number,' ',''),'-','') = %s", (statement['acc_number'],))
bank_ids = [id[0] for id in cr.fetchall()]
# Filter bank accounts which are not allowed
bank_ids = self.pool.get('res.partner.bank').search(cr, uid, [('id', 'in', bank_ids)])
if bank_ids and len(bank_ids) > 0:
bank_accs = self.pool.get('res.partner.bank').browse(cr, uid, bank_ids)
for bank_acc in bank_accs:
if bank_acc.journal_id.id and ((bank_acc.journal_id.currency.id and bank_acc.journal_id.currency.name == statement['currency']) or (not bank_acc.journal_id.currency.id and bank_acc.journal_id.company_id.currency_id.name == statement['currency'])):
statement['journal_id'] = bank_acc.journal_id
statement['bank_account'] = bank_acc
break
if not statement['bank_account']:
raise osv.except_osv(_('Error') + ' R1004', _("No matching Bank Account (with Account Journal) found.\n\nPlease set-up a Bank Account with as Account Number '%s' and as Currency '%s' and an Account Journal.") % (statement['acc_number'], statement['currency']))
statement['description'] = rmspaces(line[90:125])
statement['balance_start'] = float(rmspaces(line[43:58])) / 1000
if line[42] == '1': #1 = Debit, the starting balance is negative
statement['balance_start'] = - statement['balance_start']
statement['balance_start_date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT, time.strptime(rmspaces(line[58:64]), '%d%m%y'))
statement['accountHolder'] = rmspaces(line[64:90])
statement['paperSeqNumber'] = rmspaces(line[2:5])
statement['codaSeqNumber'] = rmspaces(line[125:128])
elif line[0] == '2':
if line[1] == '1':
#New statement line
statementLine = {}
statementLine['ref'] = rmspaces(line[2:10])
statementLine['ref_move'] = rmspaces(line[2:6])
statementLine['ref_move_detail'] = rmspaces(line[6:10])
statementLine['sequence'] = len(statement['lines']) + 1
statementLine['transactionRef'] = rmspaces(line[10:31])
statementLine['debit'] = line[31] # 0 = Credit, 1 = Debit
statementLine['amount'] = float(rmspaces(line[32:47])) / 1000
if statementLine['debit'] == '1':
statementLine['amount'] = - statementLine['amount']
statementLine['transactionDate'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT, time.strptime(rmspaces(line[47:53]), '%d%m%y'))
statementLine['transaction_family'] = rmspaces(line[54:56])
statementLine['transaction_code'] = rmspaces(line[56:58])
statementLine['transaction_category'] = rmspaces(line[58:61])
if line[61] == '1':
#Structured communication
statementLine['communication_struct'] = True
statementLine['communication_type'] = line[62:65]
statementLine['communication'] = '+++' + line[65:68] + '/' + line[68:72] + '/' + line[72:77] + '+++'
else:
#Non-structured communication
statementLine['communication_struct'] = False
statementLine['communication'] = rmspaces(line[62:115])
statementLine['entryDate'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT, time.strptime(rmspaces(line[115:121]), '%d%m%y'))
statementLine['type'] = 'normal'
statementLine['globalisation'] = int(line[124])
if statementLine['globalisation'] > 0:
if statementLine['globalisation'] in statement['globalisation_stack']:
statement['globalisation_stack'].remove(statementLine['globalisation'])
else:
statementLine['type'] = 'globalisation'
statement['globalisation_stack'].append(statementLine['globalisation'])
globalisation_comm[statementLine['ref_move']] = statementLine['communication']
if not statementLine.get('communication'):
statementLine['communication'] = globalisation_comm.get(statementLine['ref_move'], '')
statement['lines'].append(statementLine)
elif line[1] == '2':
if statement['lines'][-1]['ref'][0:4] != line[2:6]:
raise osv.except_osv(_('Error') + 'R2004', _('CODA parsing error on movement data record 2.2, seq nr %s! Please report this issue via your Odoo support channel.') % line[2:10])
statement['lines'][-1]['communication'] += rmspaces(line[10:63])
statement['lines'][-1]['payment_reference'] = rmspaces(line[63:98])
statement['lines'][-1]['counterparty_bic'] = rmspaces(line[98:109])
elif line[1] == '3':
if statement['lines'][-1]['ref'][0:4] != line[2:6]:
raise osv.except_osv(_('Error') + 'R2005', _('CODA parsing error on movement data record 2.3, seq nr %s! Please report this issue via your Odoo support channel.') % line[2:10])
if statement['version'] == '1':
statement['lines'][-1]['counterpartyNumber'] = rmspaces(line[10:22])
statement['lines'][-1]['counterpartyName'] = rmspaces(line[47:73])
statement['lines'][-1]['counterpartyAddress'] = rmspaces(line[73:125])
statement['lines'][-1]['counterpartyCurrency'] = ''
else:
if line[22] == ' ':
statement['lines'][-1]['counterpartyNumber'] = rmspaces(line[10:22])
statement['lines'][-1]['counterpartyCurrency'] = rmspaces(line[23:26])
else:
statement['lines'][-1]['counterpartyNumber'] = rmspaces(line[10:44])
statement['lines'][-1]['counterpartyCurrency'] = rmspaces(line[44:47])
statement['lines'][-1]['counterpartyName'] = rmspaces(line[47:82])
statement['lines'][-1]['communication'] += rmspaces(line[82:125])
else:
# movement data record 2.x (x != 1,2,3)
raise osv.except_osv(_('Error') + 'R2006', _('\nMovement data records of type 2.%s are not supported ') % line[1])
elif line[0] == '3':
if line[1] == '1':
infoLine = {}
infoLine['entryDate'] = statement['lines'][-1]['entryDate']
infoLine['type'] = 'information'
infoLine['sequence'] = len(statement['lines']) + 1
infoLine['ref'] = rmspaces(line[2:10])
infoLine['transactionRef'] = rmspaces(line[10:31])
infoLine['transaction_family'] = rmspaces(line[32:34])
infoLine['transaction_code'] = rmspaces(line[34:36])
infoLine['transaction_category'] = rmspaces(line[36:39])
infoLine['communication'] = rmspaces(line[40:113])
statement['lines'].append(infoLine)
elif line[1] == '2':
if infoLine['ref'] != rmspaces(line[2:10]):
raise osv.except_osv(_('Error') + 'R3004', _('CODA parsing error on information data record 3.2, seq nr %s! Please report this issue via your Odoo support channel.') % line[2:10])
statement['lines'][-1]['communication'] += rmspaces(line[10:100])
elif line[1] == '3':
if infoLine['ref'] != rmspaces(line[2:10]):
raise osv.except_osv(_('Error') + 'R3005', _('CODA parsing error on information data record 3.3, seq nr %s! Please report this issue via your Odoo support channel.') % line[2:10])
statement['lines'][-1]['communication'] += rmspaces(line[10:100])
elif line[0] == '4':
comm_line = {}
comm_line['type'] = 'communication'
comm_line['sequence'] = len(statement['lines']) + 1
comm_line['ref'] = rmspaces(line[2:10])
comm_line['communication'] = rmspaces(line[32:112])
statement['lines'].append(comm_line)
elif line[0] == '8':
# new balance record
statement['debit'] = line[41]
statement['paperSeqNumber'] = rmspaces(line[1:4])
statement['balance_end_real'] = float(rmspaces(line[42:57])) / 1000
statement['balance_end_realDate'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT, time.strptime(rmspaces(line[57:63]), '%d%m%y'))
if statement['debit'] == '1': # 1=Debit
statement['balance_end_real'] = - statement['balance_end_real']
if statement['balance_end_realDate']:
period_id = self.pool.get('account.period').search(cr, uid, [('company_id', '=', statement['journal_id'].company_id.id), ('date_start', '<=', statement['balance_end_realDate']), ('date_stop', '>=', statement['balance_end_realDate'])])
else:
period_id = self.pool.get('account.period').search(cr, uid, [('company_id', '=', statement['journal_id'].company_id.id), ('date_start', '<=', statement['date']), ('date_stop', '>=', statement['date'])])
if not period_id and len(period_id) == 0:
raise osv.except_osv(_('Error') + 'R0002', _("The CODA Statement New Balance date doesn't fall within a defined Accounting Period! Please create the Accounting Period for date %s for the company %s.") % (statement['balance_end_realDate'], statement['journal_id'].company_id.name))
statement['period_id'] = period_id[0]
elif line[0] == '9':
statement['balanceMin'] = float(rmspaces(line[22:37])) / 1000
statement['balancePlus'] = float(rmspaces(line[37:52])) / 1000
if not statement.get('balance_end_real'):
statement['balance_end_real'] = statement['balance_start'] + statement['balancePlus'] - statement['balanceMin']
for i, statement in enumerate(statements):
statement['coda_note'] = ''
balance_start_check_date = (len(statement['lines']) > 0 and statement['lines'][0]['entryDate']) or statement['date']
cr.execute('SELECT balance_end_real \
FROM account_bank_statement \
WHERE journal_id = %s and date <= %s \
ORDER BY date DESC,id DESC LIMIT 1', (statement['journal_id'].id, balance_start_check_date))
res = cr.fetchone()
balance_start_check = res and res[0]
if balance_start_check == None:
if statement['journal_id'].default_debit_account_id and (statement['journal_id'].default_credit_account_id == statement['journal_id'].default_debit_account_id):
balance_start_check = statement['journal_id'].default_debit_account_id.balance
else:
raise osv.except_osv(_('Error'), _("Configuration Error in journal %s!\nPlease verify the Default Debit and Credit Account settings.") % statement['journal_id'].name)
if balance_start_check != statement['balance_start']:
statement['coda_note'] = _("The CODA Statement %s Starting Balance (%.2f) does not correspond with the previous Closing Balance (%.2f) in journal %s!") % (statement['description'] + ' #' + statement['paperSeqNumber'], statement['balance_start'], balance_start_check, statement['journal_id'].name)
if not(statement.get('period_id')):
raise osv.except_osv(_('Error') + ' R3006', _(' No transactions or no period in coda file !'))
data = {
'name': statement['paperSeqNumber'],
'date': statement['date'],
'journal_id': statement['journal_id'].id,
'period_id': statement['period_id'],
'balance_start': statement['balance_start'],
'balance_end_real': statement['balance_end_real'],
}
statement['id'] = self.pool.get('account.bank.statement').create(cr, uid, data, context=context)
for line in statement['lines']:
if line['type'] == 'information':
statement['coda_note'] = "\n".join([statement['coda_note'], line['type'].title() + ' with Ref. ' + str(line['ref']), 'Date: ' + str(line['entryDate']), 'Communication: ' + line['communication'], ''])
elif line['type'] == 'communication':
statement['coda_note'] = "\n".join([statement['coda_note'], line['type'].title() + ' with Ref. ' + str(line['ref']), 'Ref: ', 'Communication: ' + line['communication'], ''])
elif line['type'] == 'normal':
note = []
if 'counterpartyName' in line and line['counterpartyName'] != '':
note.append(_('Counter Party') + ': ' + line['counterpartyName'])
else:
line['counterpartyName'] = False
if 'counterpartyNumber' in line and line['counterpartyNumber'] != '':
try:
if int(line['counterpartyNumber']) == 0:
line['counterpartyNumber'] = False
except:
pass
if line['counterpartyNumber']:
note.append(_('Counter Party Account') + ': ' + line['counterpartyNumber'])
else:
line['counterpartyNumber'] = False
if 'counterpartyAddress' in line and line['counterpartyAddress'] != '':
note.append(_('Counter Party Address') + ': ' + line['counterpartyAddress'])
partner_id = None
structured_com = False
bank_account_id = False
if line['communication_struct'] and 'communication_type' in line and line['communication_type'] == '101':
structured_com = line['communication']
if 'counterpartyNumber' in line and line['counterpartyNumber']:
account = str(line['counterpartyNumber'])
domain = [('acc_number', '=', account)]
iban = account[0:2].isalpha()
if iban:
n = 4
space_separated_account = ' '.join(account[i:i + n] for i in range(0, len(account), n))
domain = ['|', ('acc_number', '=', space_separated_account)] + domain
ids = self.pool.get('res.partner.bank').search(cr, uid, domain)
if ids:
bank_account_id = ids[0]
bank_account = self.pool.get('res.partner.bank').browse(cr, uid, bank_account_id, context=context)
line['counterpartyNumber'] = bank_account.acc_number
partner_id = bank_account.partner_id.id
else:
#create the bank account, not linked to any partner. The reconciliation will link the partner manually
#chosen at the bank statement final confirmation time.
try:
type_model, type_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'bank_normal')
type_id = self.pool.get('res.partner.bank.type').browse(cr, uid, type_id, context=context)
bank_code = type_id.code
except ValueError:
bank_code = 'bank'
bank_account_id = self.pool.get('res.partner.bank').create(cr, uid, {'acc_number': str(line['counterpartyNumber']), 'state': bank_code}, context=context)
if line.get('communication', ''):
note.append(_('Communication') + ': ' + line['communication'])
data = {
'name': structured_com or (line.get('communication', '') != '' and line['communication'] or '/'),
'note': "\n".join(note),
'date': line['entryDate'],
'amount': line['amount'],
'partner_id': partner_id,
'partner_name': line['counterpartyName'],
'statement_id': statement['id'],
'ref': line['ref'],
'sequence': line['sequence'],
'bank_account_id': bank_account_id,
}
self.pool.get('account.bank.statement.line').create(cr, uid, data, context=context)
if statement['coda_note'] != '':
self.pool.get('account.bank.statement').write(cr, uid, [statement['id']], {'coda_note': statement['coda_note']}, context=context)
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'action_bank_reconcile_bank_statements')
action = self.pool[model].browse(cr, uid, action_id, context=context)
statements_ids = [statement['id'] for statement in statements]
return {
'name': action.name,
'tag': action.tag,
'context': {'statement_ids': statements_ids},
'type': 'ir.actions.client',
}
def rmspaces(s):
return " ".join(s.split())
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JioCloud/tempest | tempest/api/network/test_routers_negative.py | 11 | 5926 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base_routers as base
from tempest import config
from tempest import test
CONF = config.CONF
class RoutersNegativeTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(RoutersNegativeTest, cls).skip_checks()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(RoutersNegativeTest, cls).resource_setup()
cls.router = cls.create_router(data_utils.rand_name('router-'))
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.tenant_cidr = (CONF.network.tenant_network_cidr
if cls._ip_version == 4 else
CONF.network.tenant_network_v6_cidr)
@test.attr(type=['negative'])
@test.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
def test_router_add_gateway_invalid_network_returns_404(self):
self.assertRaises(lib_exc.NotFound,
self.client.update_router,
self.router['id'],
external_gateway_info={
'network_id': self.router['id']})
@test.attr(type=['negative'])
@test.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
def test_router_add_gateway_net_not_external_returns_400(self):
alt_network = self.create_network(
network_name=data_utils.rand_name('router-negative-'))
sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
self.create_subnet(alt_network, cidr=sub_cidr)
self.assertRaises(lib_exc.BadRequest,
self.client.update_router,
self.router['id'],
external_gateway_info={
'network_id': alt_network['id']})
@test.attr(type=['negative'])
@test.idempotent_id('957751a3-3c68-4fa2-93b6-eb52ea10db6e')
def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
network01 = self.create_network(
network_name=data_utils.rand_name('router-network01-'))
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
subnet02 = self.create_subnet(network02)
self._add_router_interface_with_subnet_id(self.router['id'],
subnet01['id'])
self.assertRaises(lib_exc.BadRequest,
self._add_router_interface_with_subnet_id,
self.router['id'],
subnet02['id'])
@test.attr(type=['negative'])
@test.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
def test_router_remove_interface_in_use_returns_409(self):
self.client.add_router_interface_with_subnet_id(
self.router['id'], self.subnet['id'])
self.assertRaises(lib_exc.Conflict,
self.client.delete_router,
self.router['id'])
@test.attr(type=['negative'])
@test.idempotent_id('c2a70d72-8826-43a7-8208-0209e6360c47')
def test_show_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.show_router,
router)
@test.attr(type=['negative'])
@test.idempotent_id('b23d1569-8b0c-4169-8d4b-6abd34fad5c7')
def test_update_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.update_router,
router, name="new_name")
@test.attr(type=['negative'])
@test.idempotent_id('c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4')
def test_delete_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.delete_router,
router)
class RoutersNegativeIpV6Test(RoutersNegativeTest):
_ip_version = 6
class DvrRoutersNegativeTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(DvrRoutersNegativeTest, cls).skip_checks()
if not test.is_extension_enabled('dvr', 'network'):
msg = "DVR extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(DvrRoutersNegativeTest, cls).resource_setup()
cls.router = cls.create_router(data_utils.rand_name('router'))
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
@test.attr(type=['negative'])
@test.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9')
def test_router_create_tenant_distributed_returns_forbidden(self):
self.assertRaises(lib_exc.Forbidden,
self.create_router,
data_utils.rand_name('router'),
distributed=True)
| apache-2.0 |
vivsh/django-ginger | ginger/html/forms.py | 1 | 6313 | from ginger.dataset import GingerDataSet
from django.forms.widgets import CheckboxInput
import re
from collections import namedtuple
from django.middleware import csrf
from django.utils import six
from django.utils.encoding import force_text
from ginger import utils
from . import common
__all__ = ["Choice", "Link", "form_csrf_tag", "form_attrs", "form_css_class",
"field_choices", "field_name_range", "field_links", "iter_fields", "widget_css_class",
"render_widget", "register_layout", "render_field", "field_css_class", "field_range",
"render_page", "wrap_csrf_token", "is_selected_choice", "make_css_class"]
Choice = namedtuple("Choice", ["name", "value", "content", "selected"])
class Link(object):
def __init__(self, url, content, is_active=False, **kwargs):
self.url = url
self.content = content
self.is_active = is_active
for k in kwargs:
setattr(self, k, kwargs[k])
_layouts = {}
def make_css_class(obj, suffix=""):
name = utils.camel_to_hyphen(re.sub(r'(?i)widget|field|ginger|form|input', '', obj.__class__.__name__, 1))
if suffix:
name = "%s%s" % (name, suffix)
return name
def is_selected_choice(values, choice):
if not isinstance(values, (list, tuple)):
values = (values, )
text_choice = force_text(choice)
for v in values:
if v == choice or text_choice == force_text(v):
return True
return False
def field_choices(field):
form_field = field.field
field_value = field.value()
name = field.html_name
for code, label in form_field.choices:
is_active = is_selected_choice(field_value, code)
yield Choice(name, code, label, is_active)
def field_links(request, field):
url = request.get_full_path()
form_field = field.field
field_value = field.value()
if hasattr(form_field, 'build_links'):
for value in form_field.build_links(request, field):
yield value
else:
for code, label in form_field.choices:
is_active = is_selected_choice(field_value, code)
link_url = utils.get_url_with_modified_params(url, {field.name: code})
yield Link(link_url, label, is_active, value=code)
def form_attrs(form, **kwargs):
attrs = kwargs
attrs.setdefault("method", "post")
classes = attrs.pop("class", "")
if isinstance(classes, six.string_types):
classes = classes.split(" ")
classes.append(form_css_class(form))
attrs["class"] = classes
attrs['enctype']='multipart/form-data' if form.is_multipart() else 'application/x-www-form-urlencoded'
return common.html_attrs(attrs)
def form_csrf_tag(request):
csrf_token = csrf.get_token(request)
el = common.input(type_="hidden", name="csrfmiddlewaretoken", value=csrf_token)
return el.render()
def wrap_csrf_token(token):
el = common.input(type_="hidden", name="csrfmiddlewaretoken", value=token)
return el.render()
def field_range(form, start, end, step=None, hidden=True):
field_names = field_name_range(form, start, end, step)
return iter_fields(form, field_names, hidden=hidden)
def field_name_range(form, first, last, step=None, field_names=None):
if field_names is None:
field_names = list(form.fields.keys())
keys = field_names
if first is not None and isinstance(first, six.string_types):
try:
first = keys.index(first)
except ValueError:
raise KeyError("%r is not a field for form %r" % (first, form.__class__.__name__))
if last is not None and isinstance(last, six.string_types):
try:
last = keys.index(last)-1
except ValueError:
raise KeyError("%r is not a field for form %r" % (last, form.__class__.__name__))
return keys[first:last:step]
def iter_fields(form, names, hidden=True):
for name in names:
field = form[name]
if hidden or not field.hidden:
yield field
def render_field(field, layout=None, **kwargs):
if field.is_hidden:
return field.as_hidden()
layout = _layouts.get(layout, default_layout)
template = layout(field)
ctx = {
"field": field,
"label": field.label,
"label_tag": common.label(class_="form-label", for_=field.id_for_label)[field.label] if field.label else "",
"widget": render_widget(field),
"help": field.help_text,
"help_tag": common.div(class_="form-help")[field.help_text],
"errors": field.errors
}
content = template.format(**ctx)
classes = ["form-field", field_css_class(field)]
if field.errors:
classes.append("has-error")
return common.div(class_=classes,
data_field=field.name, **kwargs)[content]
def render_widget(field, **attrs):
el = common.div(**attrs)[str(field)]
el.attrib.update(class_=[widget_css_class(field), "form-widget"])
return el.render()
def register_layout(name, func):
_layouts[name] = func
def default_layout(field):
if isinstance(field.field.widget, CheckboxInput):
return "{widget}{label_tag}{help}{errors}"
return "{label_tag}{widget}{help}{errors}"
def field_css_class(field):
return make_css_class(field.field, "-field")
def widget_css_class(field):
return make_css_class(field.field.widget, "-widget")
def form_css_class(form):
return make_css_class(form, "-form")
def render_page(request, page, previous="«", next="»", **kwargs):
if isinstance(page, GingerDataSet):
page = page.object_list
if page.paginator.num_pages <= 1:
return ""
H = common
nav = H.ul(class_="pagination", **kwargs)
if page.has_previous():
url = page.previous_link(request).url
previous_tag = H.li(aria_label="Previous")[H.a(href=url)[previous]]
nav.append(previous_tag)
for link in page.build_links(request):
if link.is_active:
el = H.li(class_="active")[H.span[link.content]]
else:
el = H.li[H.a(href=link.url)[link.content]]
nav.append(el)
if page.has_next():
url = page.next_link(request).url
next_tag = H.li(aria_label="Next")[H.a(href=url)[next]]
nav.append(next_tag)
return nav.render() | mit |
veroc/Bika-LIMS | bika/lims/exportimport/instruments/beckmancoulter/access/model2.py | 3 | 3313 | """ Beckman Coulter Access 2
"""
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from . import BeckmancoulterAccessCSVParser, BeckmancoulterAccessImporter
import json
import traceback
title = "Beckman Coulter Access 2"
def Import(context, request):
""" Beckman Coulter Access 2 analysis results
"""
infile = request.form['beckmancoulter_access_model2_file']
fileformat = request.form['beckmancoulter_access_model2_format']
artoapply = request.form['beckmancoulter_access_model2_artoapply']
override = request.form['beckmancoulter_access_model2_override']
sample = request.form.get('beckmancoulter_access_model2_sample',
'requestid')
instrument = request.form.get('beckmancoulter_access_model2_instrument', None)
errors = []
logs = []
warns = []
# Load the most suitable parser according to file extension/options/etc...
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
if fileformat == 'csv':
parser = BeckmancoulterAccess2CSVParser(infile)
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
if sample == 'requestid':
sam = ['getRequestID']
if sample == 'sampleid':
sam = ['getSampleID']
elif sample == 'clientsid':
sam = ['getClientSampleID']
elif sample == 'sample_clientsid':
sam = ['getSampleID', 'getClientSampleID']
importer = BeckmancoulterAccess2Importer(parser=parser,
context=context,
idsearchcriteria=sam,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results)
class BeckmancoulterAccess2CSVParser(BeckmancoulterAccessCSVParser):
def getAttachmentFileType(self):
return "Beckman Couter Access 2"
class BeckmancoulterAccess2Importer(BeckmancoulterAccessImporter):
def getKeywordsToBeExcluded(self):
return []
| agpl-3.0 |
uwdata/termite-data-server | web2py/gluon/contrib/login_methods/oneall_account.py | 33 | 4559 | #!/usr/bin/env python
# coding: utf8
"""
Oneall Authentication for web2py
Developed by Nathan Freeze (Copyright © 2013)
Email <[email protected]>
This file contains code to allow using onall.com
authentication services with web2py
"""
import os
import base64
from gluon import *
from gluon.storage import Storage
from gluon.contrib.simplejson import JSONDecodeError
from gluon.tools import fetch
import gluon.contrib.simplejson as json
class OneallAccount(object):
"""
from gluon.contrib.login_methods.oneall_account import OneallAccount
auth.settings.actions_disabled=['register','change_password',
'request_reset_password']
auth.settings.login_form = OneallAccount(request,
public_key="...",
private_key="...",
domain="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
"""
def __init__(self, request, public_key="", private_key="", domain="",
url=None, providers=None, on_login_failure=None):
self.request = request
self.public_key = public_key
self.private_key = private_key
self.url = url
self.domain = domain
self.profile = None
self.on_login_failure = on_login_failure
self.providers = providers or ["facebook", "google", "yahoo", "openid"]
self.mappings = Storage()
def defaultmapping(profile):
name = profile.get('name',{})
dname = name.get('formatted',profile.get('displayName'))
email=profile.get('emails', [{}])[0].get('value')
reg_id=profile.get('identity_token','')
username=profile.get('preferredUsername',email)
first_name=name.get('givenName', dname.split(' ')[0])
last_name=profile.get('familyName',dname.split(' ')[1])
return dict(registration_id=reg_id,username=username,email=email,
first_name=first_name,last_name=last_name)
self.mappings.default = defaultmapping
def get_user(self):
request = self.request
user = None
if request.vars.connection_token:
auth_url = "https://%s.api.oneall.com/connections/%s.json" % \
(self.domain, request.vars.connection_token)
auth_pw = "%s:%s" % (self.public_key,self.private_key)
auth_pw = base64.b64encode(auth_pw)
headers = dict(Authorization="Basic %s" % auth_pw)
try:
auth_info_json = fetch(auth_url,headers=headers)
auth_info = json.loads(auth_info_json)
data = auth_info['response']['result']['data']
if data['plugin']['key'] == 'social_login':
if data['plugin']['data']['status'] == 'success':
userdata = data['user']
self.profile = userdata['identity']
source = self.profile['source']['key']
mapping = self.mappings.get(source,self.mappings['default'])
user = mapping(self.profile)
except (JSONDecodeError, KeyError):
pass
if user is None and self.on_login_failure:
redirect(self.on_login_failure)
return user
def login_form(self):
scheme = self.request.env.wsgi_url_scheme
oneall_url = scheme + "://%s.api.oneall.com/socialize/library.js" % self.domain
oneall_lib = SCRIPT(_src=oneall_url,_type='text/javascript')
container = DIV(_id="oa_social_login_container")
widget = SCRIPT('oneall.api.plugins.social_login.build("oa_social_login_container",',
'{providers : %s,' % self.providers,
'callback_uri: "%s"});' % self.url,
_type="text/javascript")
form = DIV(oneall_lib,container,widget)
return form
def use_oneall(auth, filename='private/oneall.key', **kwargs):
path = os.path.join(current.request.folder, filename)
if os.path.exists(path):
request = current.request
domain, public_key, private_key = open(path, 'r').read().strip().split(':')
url = URL('default', 'user', args='login', scheme=True)
auth.settings.actions_disabled =\
['register', 'change_password', 'request_reset_password']
auth.settings.login_form = OneallAccount(
request, public_key=public_key,private_key=private_key,
domain=domain, url=url, **kwargs)
| bsd-3-clause |
nathanielvarona/airflow | tests/www/views/conftest.py | 1 | 6162 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
from typing import Any, Dict, Generator, List, NamedTuple
import flask
import jinja2
import pytest
from airflow import settings
from airflow.models import DagBag
from airflow.www.app import create_app
from tests.test_utils.api_connexion_utils import create_user, delete_roles
from tests.test_utils.decorators import dont_initialize_flask_app_submodules
from tests.test_utils.www import client_with_login
@pytest.fixture(autouse=True, scope="module")
def session():
settings.configure_orm()
yield settings.Session
@pytest.fixture(autouse=True, scope="module")
def examples_dag_bag(session):
DagBag(include_examples=True).sync_to_db()
dag_bag = DagBag(include_examples=True, read_dags_from_db=True)
session.commit()
yield dag_bag
@pytest.fixture(scope="module")
def app(examples_dag_bag):
@dont_initialize_flask_app_submodules(
skip_all_except=[
"init_api_connexion",
"init_appbuilder",
"init_appbuilder_links",
"init_appbuilder_views",
"init_flash_views",
"init_jinja_globals",
"init_plugins",
]
)
def factory():
return create_app(testing=True)
app = factory()
app.config["WTF_CSRF_ENABLED"] = False
app.dag_bag = examples_dag_bag
app.jinja_env.undefined = jinja2.StrictUndefined
security_manager = app.appbuilder.sm # pylint: disable=no-member
if not security_manager.find_user(username='test'):
security_manager.add_user(
username='test',
first_name='test',
last_name='test',
email='[email protected]',
role=security_manager.find_role('Admin'),
password='test',
)
if not security_manager.find_user(username='test_user'):
security_manager.add_user(
username='test_user',
first_name='test_user',
last_name='test_user',
email='[email protected]',
role=security_manager.find_role('User'),
password='test_user',
)
if not security_manager.find_user(username='test_viewer'):
security_manager.add_user(
username='test_viewer',
first_name='test_viewer',
last_name='test_viewer',
email='[email protected]',
role=security_manager.find_role('Viewer'),
password='test_viewer',
)
yield app
delete_roles(app)
@pytest.fixture()
def admin_client(app):
return client_with_login(app, username="test", password="test")
@pytest.fixture()
def viewer_client(app):
return client_with_login(app, username="test_viewer", password="test_viewer")
@pytest.fixture()
def user_client(app):
return client_with_login(app, username="test_user", password="test_user")
@pytest.fixture(scope="module")
def client_factory(app):
def factory(name, role_name, permissions):
create_user(app, name, role_name, permissions)
client = app.test_client()
resp = client.post("/login/", data={"username": name, "password": name})
assert resp.status_code == 302
return client
return factory
class _TemplateWithContext(NamedTuple):
template: jinja2.environment.Template
context: Dict[str, Any]
@property
def name(self):
return self.template.name
@property
def local_context(self):
"""Returns context without global arguments"""
result = self.context.copy()
keys_to_delete = [
# flask.templating._default_template_ctx_processor
'g',
'request',
'session',
# flask_wtf.csrf.CSRFProtect.init_app
'csrf_token',
# flask_login.utils._user_context_processor
'current_user',
# flask_appbuilder.baseviews.BaseView.render_template
'appbuilder',
'base_template',
# airflow.www.app.py.create_app (inner method - jinja_globals)
'server_timezone',
'default_ui_timezone',
'hostname',
'navbar_color',
'log_fetch_delay_sec',
'log_auto_tailing_offset',
'log_animation_speed',
'state_color_mapping',
'airflow_version',
'git_version',
'k8s_or_k8scelery_executor',
# airflow.www.static_config.configure_manifest_files
'url_for_asset',
# airflow.www.views.AirflowBaseView.render_template
'scheduler_job',
# airflow.www.views.AirflowBaseView.extra_args
'macros',
]
for key in keys_to_delete:
del result[key]
return result
@pytest.fixture(scope="module")
def capture_templates(app):
@contextmanager
def manager() -> Generator[List[_TemplateWithContext], None, None]:
recorded = []
def record(sender, template, context, **extra): # pylint: disable=unused-argument
recorded.append(_TemplateWithContext(template, context))
flask.template_rendered.connect(record, app) # type: ignore
try:
yield recorded
finally:
flask.template_rendered.disconnect(record, app) # type: ignore
assert recorded, "Failed to catch the templates"
return manager
| apache-2.0 |
nkrinner/nova | nova/filters.py | 21 | 3305 | # Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter support
"""
from nova import loadables
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
"""Yield objects that pass the filter.
Can be overridden in a subclass, if you need to base filtering
decisions on all objects. Otherwise, one can just override
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
# Set to true in a subclass if a filter only needs to be run once
# for each request rather than for each instance
run_filter_once_per_request = False
def run_filter_for_index(self, index):
"""Return True if the filter needs to be run for the "index-th"
instance in a request. Only need to override this if a filter
needs anything other than "first only" or "all" behaviour.
"""
if self.run_filter_once_per_request and index > 0:
return False
else:
return True
class BaseFilterHandler(loadables.BaseLoader):
"""Base class to handle loading filter classes.
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filter_classes, objs,
filter_properties, index=0):
list_objs = list(objs)
LOG.debug(_("Starting with %d host(s)"), len(list_objs))
for filter_cls in filter_classes:
cls_name = filter_cls.__name__
filter = filter_cls()
if filter.run_filter_for_index(index):
objs = filter.filter_all(list_objs,
filter_properties)
if objs is None:
LOG.debug(_("Filter %(cls_name)s says to stop filtering"),
{'cls_name': cls_name})
return
list_objs = list(objs)
if not list_objs:
LOG.info(_("Filter %s returned 0 hosts"), cls_name)
break
LOG.debug(_("Filter %(cls_name)s returned "
"%(obj_len)d host(s)"),
{'cls_name': cls_name, 'obj_len': len(list_objs)})
return list_objs
| apache-2.0 |
pygeek/django | django/contrib/sessions/backends/file.py | 4 | 5255 | import errno
import os
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not self.storage_path:
self.storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(self.storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % self.storage_path)
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
VALID_KEY_CHARS = set("abcdef0123456789")
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(self.VALID_KEY_CHARS):
raise SuspiciousOperation(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def load(self):
session_data = {}
try:
with open(self._key_to_file(), "rb") as session_file:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation):
self.create()
except IOError:
self.create()
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError as e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data).encode())
finally:
os.close(output_file_fd)
os.rename(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
| bsd-3-clause |
jesramirez/odoo | openerp/addons/test_limits/models.py | 435 | 1034 | # -*- coding: utf-8 -*-
import time
import openerp
class m(openerp.osv.osv.Model):
""" This model exposes a few methods that will consume between 'almost no
resource' and 'a lot of resource'.
"""
_name = 'test.limits.model'
def consume_nothing(self, cr, uid, context=None):
return True
def consume_memory(self, cr, uid, size, context=None):
l = [0] * size
return True
def leak_memory(self, cr, uid, size, context=None):
if not hasattr(self, 'l'):
self.l = []
self.l.append([0] * size)
return True
def consume_time(self, cr, uid, seconds, context=None):
time.sleep(seconds)
return True
def consume_cpu_time(self, cr, uid, seconds, context=None):
t0 = time.clock()
t1 = time.clock()
while t1 - t0 < seconds:
for i in xrange(10000000):
x = i * i
t1 = time.clock()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Samuc/Proyecto-IV | lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| gpl-2.0 |
ujjwal96/mitmproxy | mitmproxy/websocket.py | 3 | 6332 | import time
import queue
from typing import List, Optional
from wsproto.frame_protocol import CloseReason
from wsproto.frame_protocol import Opcode
from mitmproxy import flow
from mitmproxy.net import websockets
from mitmproxy.coretypes import serializable
from mitmproxy.utils import strutils, human
class WebSocketMessage(serializable.Serializable):
"""
A WebSocket message sent from one endpoint to the other.
"""
def __init__(
self, type: int, from_client: bool, content: bytes, timestamp: Optional[int]=None, killed: bool=False
) -> None:
self.type = Opcode(type) # type: ignore
"""indicates either TEXT or BINARY (from wsproto.frame_protocol.Opcode)."""
self.from_client = from_client
"""True if this messages was sent by the client."""
self.content = content
"""A byte-string representing the content of this message."""
self.timestamp: int = timestamp or int(time.time())
"""Timestamp of when this message was received or created."""
self.killed = killed
"""True if this messages was killed and should not be sent to the other endpoint."""
@classmethod
def from_state(cls, state):
return cls(*state)
def get_state(self):
return int(self.type), self.from_client, self.content, self.timestamp, self.killed
def set_state(self, state):
self.type, self.from_client, self.content, self.timestamp, self.killed = state
self.type = Opcode(self.type) # replace enum with bare int
def __repr__(self):
if self.type == Opcode.TEXT:
return "text message: {}".format(repr(self.content))
else:
return "binary message: {}".format(strutils.bytes_to_escaped_str(self.content))
def kill(self):
"""
Kill this message.
It will not be sent to the other endpoint. This has no effect in streaming mode.
"""
self.killed = True
class WebSocketFlow(flow.Flow):
"""
A WebSocketFlow is a simplified representation of a Websocket connection.
"""
def __init__(self, client_conn, server_conn, handshake_flow, live=None):
super().__init__("websocket", client_conn, server_conn, live)
self.messages: List[WebSocketMessage] = []
"""A list containing all WebSocketMessage's."""
self.close_sender = 'client'
"""'client' if the client initiated connection closing."""
self.close_code = CloseReason.NORMAL_CLOSURE
"""WebSocket close code."""
self.close_message = '(message missing)'
"""WebSocket close message."""
self.close_reason = 'unknown status code'
"""WebSocket close reason."""
self.stream = False
"""True of this connection is streaming directly to the other endpoint."""
self.handshake_flow = handshake_flow
"""The HTTP flow containing the initial WebSocket handshake."""
self.ended = False
"""True when the WebSocket connection has been closed."""
self._inject_messages_client = queue.Queue(maxsize=1)
self._inject_messages_server = queue.Queue(maxsize=1)
if handshake_flow:
self.client_key = websockets.get_client_key(handshake_flow.request.headers)
self.client_protocol = websockets.get_protocol(handshake_flow.request.headers)
self.client_extensions = websockets.get_extensions(handshake_flow.request.headers)
self.server_accept = websockets.get_server_accept(handshake_flow.response.headers)
self.server_protocol = websockets.get_protocol(handshake_flow.response.headers)
self.server_extensions = websockets.get_extensions(handshake_flow.response.headers)
else:
self.client_key = ''
self.client_protocol = ''
self.client_extensions = ''
self.server_accept = ''
self.server_protocol = ''
self.server_extensions = ''
_stateobject_attributes = flow.Flow._stateobject_attributes.copy()
# mypy doesn't support update with kwargs
_stateobject_attributes.update(dict(
messages=List[WebSocketMessage],
close_sender=str,
close_code=int,
close_message=str,
close_reason=str,
client_key=str,
client_protocol=str,
client_extensions=str,
server_accept=str,
server_protocol=str,
server_extensions=str,
# Do not include handshake_flow, to prevent recursive serialization!
# Since mitmproxy-console currently only displays HTTPFlows,
# dumping the handshake_flow will include the WebSocketFlow too.
))
def get_state(self):
d = super().get_state()
d['close_code'] = int(d['close_code']) # replace enum with bare int
return d
@classmethod
def from_state(cls, state):
f = cls(None, None, None)
f.set_state(state)
return f
def __repr__(self):
return "<WebSocketFlow ({} messages)>".format(len(self.messages))
def message_info(self, message: WebSocketMessage) -> str:
return "{client} {direction} WebSocket {type} message {direction} {server}{endpoint}".format(
type=message.type,
client=human.format_address(self.client_conn.address),
server=human.format_address(self.server_conn.address),
direction="->" if message.from_client else "<-",
endpoint=self.handshake_flow.request.path,
)
def inject_message(self, endpoint, payload):
"""
Inject and send a full WebSocket message to the remote endpoint.
This might corrupt your WebSocket connection! Be careful!
The endpoint needs to be either flow.client_conn or flow.server_conn.
If ``payload`` is of type ``bytes`` then the message is flagged as
being binary If it is of type ``str`` encoded as UTF-8 and sent as
text.
:param payload: The message body to send.
:type payload: ``bytes`` or ``str``
"""
if endpoint == self.client_conn:
self._inject_messages_client.put(payload)
elif endpoint == self.server_conn:
self._inject_messages_server.put(payload)
else:
raise ValueError('Invalid endpoint')
| mit |
PandaWei/tp-qemu | qemu/tests/win_virtio_serial_data_transfer_reboot.py | 6 | 4731 | import os
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import data_dir
from virttest import qemu_virtio_port
# This decorator makes the test function aware of context strings
@error.context_aware
def run(test, params, env):
"""
QEMU 'Windows virtio-serial data transfer' test
1) Start guest with one virtio-serial-pci and two virtio-serial-port.
2) Make sure vioser.sys verifier enabled in guest.
3) Transfering data from host to guest via virtio-serial-port in a loop.
4) Reboot guest.
5) Repeat step 3.
6) Reboot guest by system_reset qmp command.
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def get_virtio_port_host_file(vm, port_name):
"""
Returns separated virtserialports
:param vm: VM object
:return: All virtserialports
"""
for port in vm.virtio_ports:
if isinstance(port, qemu_virtio_port.VirtioSerial):
if port.name == port_name:
return port.hostfile
def receive_data(session, serial_receive_cmd, data_file):
output = session.cmd_output(serial_receive_cmd, timeout=30)
ori_data = file(data_file, "r").read()
if ori_data.strip() != output.strip():
err = "Data lost during transfer. Origin data is:\n%s" % ori_data
err += "Guest receive data:\n%s" % output
raise error.TestFail(err)
def transfer_data(session, receive_cmd, send_cmd, data_file, n_time):
txt = "Transfer data betwwen guest and host for %s times" % n_time
error.context(txt, logging.info)
for num in xrange(n_time):
logging.info("Data transfer repeat %s/%s." % (num + 1, n_time))
try:
args = (session, receive_cmd, data_file)
guest_receive = utils.InterruptedThread(receive_data, args)
guest_receive.start()
utils.system(send_cmd, timeout=30)
finally:
if guest_receive:
guest_receive.join(10)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
check_cmd = params.get("check_vioser_status_cmd",
"verifier /querysettings")
output = session.cmd(check_cmd, timeout=360)
error.context("Make sure vioser.sys verifier enabled in guest.",
logging.info)
if "vioser.sys" not in output:
verify_cmd = params.get("vioser_verify_cmd",
"verifier.exe /standard /driver vioser.sys")
session.cmd(verify_cmd, timeout=360)
session = vm.reboot(session=session, timeout=timeout)
output = session.cmd(check_cmd, timeout=360)
if "vioser.sys" not in output:
error.TestError("Fail to veirfy vioser.sys driver.")
guest_scripts = params["guest_scripts"]
guest_path = params.get("guest_script_folder", "C:\\")
error.context("Copy test scripts to guest.", logging.info)
for script in guest_scripts.split(";"):
link = os.path.join(data_dir.get_deps_dir("win_serial"), script)
vm.copy_files_to(link, guest_path, timeout=60)
port_name = params["virtio_ports"].split()[0]
host_file = get_virtio_port_host_file(vm, port_name)
data_file = params["data_file"]
data_file = os.path.join(data_dir.get_deps_dir("win_serial"),
data_file)
send_script = params.get("host_send_script", "serial-host-send.py")
send_script = os.path.join(data_dir.get_deps_dir("win_serial"),
send_script)
serial_send_cmd = "python %s %s %s" % (send_script, host_file, data_file)
receive_script = params.get("guest_receive_script",
"VirtIoChannel_guest_recieve.py")
receive_script = "%s%s" % (guest_path, receive_script)
serial_receive_cmd = "python %s %s " % (receive_script, port_name)
n_time = int(params.get("repeat_times", 20))
transfer_data(session, serial_receive_cmd, serial_send_cmd,
data_file, n_time)
error.context("Reboot guest.", logging.info)
session = vm.reboot(session=session, timeout=timeout)
transfer_data(session, serial_receive_cmd, serial_send_cmd,
data_file, n_time)
error.context("Reboot guest by system_reset qmp command.", logging.info)
session = vm.reboot(session=session, method="system_reset",
timeout=timeout)
if session:
session.close()
| gpl-2.0 |
sebastic/QGIS | python/plugins/db_manager/db_plugins/postgis/info_model.py | 3 | 11691 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import QApplication
from ..info_model import TableInfo, VectorTableInfo, RasterTableInfo
from ..html_elems import HtmlSection, HtmlParagraph, HtmlTable, HtmlTableHeader, HtmlTableCol
class PGTableInfo(TableInfo):
def __init__(self, table):
self.table = table
def generalInfo(self):
ret = []
# if the estimation is less than 100 rows, try to count them - it shouldn't take long time
if self.table.rowCount is None and self.table.estimatedRowCount < 100:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table._relationType == 'v' else
QApplication.translate("DBManagerPlugin", "Materialized view") if self.table._relationType == 'm' else
QApplication.translate("DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Owner:"), self.table.owner)
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
tbl.extend([
(QApplication.translate("DBManagerPlugin", "Pages:"), self.table.pages),
(QApplication.translate("DBManagerPlugin", "Rows (estimation):"), self.table.estimatedRowCount)
])
# privileges
# has the user access to this schema?
schema_priv = self.table.database().connector.getSchemaPrivileges(
self.table.schemaName()) if self.table.schema() else None
if schema_priv is None:
pass
elif not schema_priv[1]: # no usage privileges on the schema
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"),
QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have usage privileges for this schema!")))
else:
table_priv = self.table.database().connector.getTablePrivileges((self.table.schemaName(), self.table.name))
privileges = []
if table_priv[0]:
privileges.append("select")
if self.table.rowCount is not None or self.table.rowCount >= 0:
tbl.append((QApplication.translate("DBManagerPlugin", "Rows (counted):"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate(
"DBManagerPlugin", 'Unknown (<a href="action:rows/count">find out</a>)')))
if table_priv[1]:
privileges.append("insert")
if table_priv[2]:
privileges.append("update")
if table_priv[3]:
privileges.append("delete")
priv_string = u", ".join(privileges) if len(privileges) > 0 else QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges!')
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"), priv_string))
ret.append(HtmlTable(tbl))
if schema_priv is not None and schema_priv[1]:
if table_priv[0] and not table_priv[1] and not table_priv[2] and not table_priv[3]:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> This user has read-only privileges.")))
if not self.table.isView:
if self.table.rowCount is not None:
if abs(self.table.estimatedRowCount - self.table.rowCount) > 1 and \
(self.table.estimatedRowCount > 2 * self.table.rowCount or
self.table.rowCount > 2 * self.table.estimatedRowCount):
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> There's a significant difference between estimated and real row count. "
'Consider running <a href="action:vacuumanalyze/run">VACUUM ANALYZE</a>.')))
# primary key defined?
if not self.table.isView:
if len(filter(lambda fld: fld.primaryKey, self.table.fields())) <= 0:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> No primary key defined for this table!")))
return ret
def getSpatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
(QApplication.translate("DBManagerPlugin", "Scripts:"), info[3]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if info[1] is not None and info[1] != info[2]:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> Version of installed scripts doesn't match version of released scripts!\n"
"This is probably a result of incorrect PostGIS upgrade.")))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
elif not self.db.connector.has_geometry_columns_access:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have privileges to read contents of geometry_columns table!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Length"), QApplication.translate("DBManagerPlugin", "Null"),
QApplication.translate("DBManagerPlugin", "Default"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
char_max_len = fld.charMaxLen if fld.charMaxLen is not None and fld.charMaxLen != -1 else ""
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), char_max_len, is_null_txt, fld.default2String()))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
ret = []
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"),
QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Enabled"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
(enabled, action) = (QApplication.translate("DBManagerPlugin", "Yes"), "disable") if trig.enabled else (
QApplication.translate("DBManagerPlugin", "No"), "enable")
txt_enabled = u'%(enabled)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {
"name": trig.name, "action": action, "enabled": enabled}
tbl.append((name, trig.function, trig.type2String(), txt_enabled))
ret.append(HtmlTable(tbl, {"class": "header"}))
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<a href="action:triggers/enable">Enable all triggers</a> / <a href="action:triggers/disable">Disable all triggers</a>')))
return ret
def rulesDetails(self):
if self.table.rules() is None or len(self.table.rules()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Definition"))
tbl.append(HtmlTableHeader(header))
# add table contents
for rule in self.table.rules():
name = u'%(name)s (<a href="action:rule/%(name)s/%(action)s">%(action)s</a>)' % {"name": rule.name,
"action": "delete"}
tbl.append((name, rule.definition))
return HtmlTable(tbl, {"class": "header"})
def getTableInfo(self):
ret = TableInfo.getTableInfo(self)
# rules
rules_details = self.rulesDetails()
if rules_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Rules'), rules_details))
return ret
class PGVectorTableInfo(PGTableInfo, VectorTableInfo):
def __init__(self, table):
VectorTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return VectorTableInfo.spatialInfo(self)
class PGRasterTableInfo(PGTableInfo, RasterTableInfo):
def __init__(self, table):
RasterTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return RasterTableInfo.spatialInfo(self)
| gpl-2.0 |
sensysnetworks/uClinux | user/python/Demo/classes/Range.py | 3 | 1684 | # Example of a generator: re-implement the built-in range function
# without actually constructing the list of values. (It turns out
# that the built-in function is about 20 times faster -- that's why
# it's built-in. :-)
# Wrapper function to emulate the complicated range() arguments
def range(*a):
if len(a) == 1:
start, stop, step = 0, a[0], 1
elif len(a) == 2:
start, stop = a
step = 1
elif len(a) == 3:
start, stop, step = a
else:
raise TypeError, 'range() needs 1-3 arguments'
return Range(start, stop, step)
# Class implementing a range object.
# To the user the instances feel like immutable sequences
# (and you can't concatenate or slice them)
class Range:
# initialization -- should be called only by range() above
def __init__(self, start, stop, step):
if step == 0:
raise ValueError, 'range() called with zero step'
self.start = start
self.stop = stop
self.step = step
self.len = max(0, int((self.stop - self.start) / self.step))
# implement `x` and is also used by print x
def __repr__(self):
return 'range' + `self.start, self.stop, self.step`
# implement len(x)
def __len__(self):
return self.len
# implement x[i]
def __getitem__(self, i):
if 0 <= i < self.len:
return self.start + self.step * i
else:
raise IndexError, 'range[i] index out of range'
# Small test program
def test():
import time, __builtin__
print range(10), range(-10, 10), range(0, 10, 2)
for i in range(100, -100, -10): print i,
print
t1 = time.time()
for i in range(1000):
pass
t2 = time.time()
for i in __builtin__.range(1000):
pass
t3 = time.time()
print t2-t1, 'sec (class)'
print t3-t2, 'sec (built-in)'
test()
| gpl-2.0 |
kgraney/msm-kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
sammyshj/stem | test/unit/interpreter/arguments.py | 2 | 1729 | import unittest
from stem.interpreter.arguments import DEFAULT_ARGS, parse, get_help
class TestArgumentParsing(unittest.TestCase):
def test_that_we_get_default_values(self):
args = parse([])
for attr in DEFAULT_ARGS:
self.assertEqual(DEFAULT_ARGS[attr], getattr(args, attr))
def test_that_we_load_arguments(self):
args = parse(['--interface', '10.0.0.25:80'])
self.assertEqual('10.0.0.25', args.control_address)
self.assertEqual(80, args.control_port)
args = parse(['--interface', '80'])
self.assertEqual(DEFAULT_ARGS['control_address'], args.control_address)
self.assertEqual(80, args.control_port)
args = parse(['--socket', '/tmp/my_socket'])
self.assertEqual('/tmp/my_socket', args.control_socket)
args = parse(['--help'])
self.assertEqual(True, args.print_help)
def test_examples(self):
args = parse(['-i', '1643'])
self.assertEqual(1643, args.control_port)
args = parse(['-s', '~/.tor/socket'])
self.assertEqual('~/.tor/socket', args.control_socket)
def test_that_we_reject_unrecognized_arguments(self):
self.assertRaises(ValueError, parse, ['--blarg', 'stuff'])
def test_that_we_reject_invalid_interfaces(self):
invalid_inputs = (
'',
' ',
'blarg',
'127.0.0.1',
'127.0.0.1:',
':80',
'400.0.0.1:80',
'127.0.0.1:-5',
'127.0.0.1:500000',
)
for invalid_input in invalid_inputs:
self.assertRaises(ValueError, parse, ['--interface', invalid_input])
def test_get_help(self):
help_text = get_help()
self.assertTrue('Interactive interpreter for Tor.' in help_text)
self.assertTrue('change control interface from 127.0.0.1:default' in help_text)
| lgpl-3.0 |
geometalab/osmaxx-frontend | tests/conversion/converters/inside_worker_test/nonop_way_test.py | 2 | 7148 | from contextlib import closing
import pytest
import sqlalchemy
from sqlalchemy.sql.schema import Table as DbTable
from osmaxx.utils.frozendict import frozendict
from tests.conftest import TagCombination
from tests.conversion.converters.inside_worker_test.conftest import slow
from tests.conversion.converters.inside_worker_test.declarative_schema import osm_models
MAJOR_KEYS = frozenset({'highway', 'railway'})
DEFAULT_EXPECTED_FALLBACK_SUBTYPE_FOR_MAJOR_KEY = frozendict(
highway='road',
railway='railway'
)
CORRESPONDING_OSMAXX_WAY_TYPES_FOR_OSM_TAG_COMBINATIONS = frozendict(
{
TagCombination(highway='track'): 'track',
TagCombination(highway='track', tracktype='grade3'): 'grade3',
TagCombination(highway='footway'): 'footway',
TagCombination(highway='secondary', junction='roundabout'): 'secondary',
TagCombination(highway='some bogus type of road', junction='roundabout'): 'roundabout',
TagCombination(railway='rail'): 'rail',
TagCombination(railway='platform'): 'railway',
},
)
CORRESPONDING_OSMAXX_STATUSES_FOR_OSM_STATUSES = frozendict(
proposed='P',
planned='P',
construction='C',
disused='D',
abandoned='A',
)
@slow
def test_osm_object_without_status_does_not_end_up_in_nonop(non_lifecycle_data_import, nonop_l, road_l, railway_l):
engine = non_lifecycle_data_import
with closing(engine.execute(sqlalchemy.select('*').select_from(road_l))) as road_result:
with closing(engine.execute(sqlalchemy.select('*').select_from(railway_l))) as railway_result:
assert road_result.rowcount + railway_result.rowcount == 1
with closing(engine.execute(sqlalchemy.select('*').select_from(nonop_l))) as nonop_result:
assert nonop_result.rowcount == 0
@slow
def test_osm_object_with_status_ends_up_in_nonop_with_correct_attribute_values(
lifecycle_data_import,
nonop_l, road_l, railway_l,
expected_osmaxx_status, osm_status, non_lifecycle_osm_tags, major_tag_key, expected_nonop_subtype,
):
engine = lifecycle_data_import
with closing(engine.execute(sqlalchemy.select('*').select_from(road_l))) as road_result:
assert road_result.rowcount == 0
with closing(engine.execute(sqlalchemy.select('*').select_from(railway_l))) as railway_result:
assert railway_result.rowcount == 0
with closing(engine.execute(sqlalchemy.select('*').select_from(nonop_l))) as result:
assert result.rowcount == 1
row = result.fetchone()
assert row['status'] == expected_osmaxx_status
assert row['tags'] == '"{key}"=>"{value}"'.format(key=osm_status, value=non_lifecycle_osm_tags[major_tag_key])
assert row['sub_type'] == expected_nonop_subtype
@slow
def test_osm_object_with_status_without_details_ends_up_in_nonop_with_correct_status(
incomplete_lifecycle_data_import, nonop_l, road_l, railway_l, expected_osmaxx_status,
expected_fallback_subtype):
engine = incomplete_lifecycle_data_import
with closing(engine.execute(sqlalchemy.select('*').select_from(road_l))) as road_result:
assert road_result.rowcount == 0
with closing(engine.execute(sqlalchemy.select('*').select_from(railway_l))) as railway_result:
assert railway_result.rowcount == 0
with closing(engine.execute(sqlalchemy.select('*').select_from(nonop_l))) as result:
assert result.rowcount == 1
row = result.fetchone()
assert row['status'] == expected_osmaxx_status
assert row['tags'] is None
assert row['sub_type'] == expected_fallback_subtype
@pytest.fixture
def nonop_l():
return DbTable('nonop_l', osm_models.metadata, schema='view_osmaxx')
@pytest.fixture
def road_l():
return DbTable('road_l', osm_models.metadata, schema='view_osmaxx')
@pytest.fixture
def railway_l():
return DbTable('railway_l', osm_models.metadata, schema='view_osmaxx')
@pytest.fixture
def expected_fallback_subtype(major_tag_key, incomplete_lifecycle_osm_tags):
if major_tag_key == 'highway' and incomplete_lifecycle_osm_tags.pop('junction', None) == 'roundabout':
return 'roundabout'
return DEFAULT_EXPECTED_FALLBACK_SUBTYPE_FOR_MAJOR_KEY[major_tag_key]
@pytest.yield_fixture
def lifecycle_data_import(lifecycle_data, data_import):
with data_import(lifecycle_data) as engine:
yield engine
@pytest.yield_fixture
def incomplete_lifecycle_data_import(incomplete_lifecycle_data, data_import):
with data_import(incomplete_lifecycle_data) as engine:
yield engine
@pytest.yield_fixture
def non_lifecycle_data_import(non_lifecycle_data, data_import):
with data_import(non_lifecycle_data) as engine:
yield engine
@pytest.fixture
def lifecycle_data(lifecycle_osm_tags):
return {osm_models.t_osm_line: lifecycle_osm_tags}
@pytest.fixture
def incomplete_lifecycle_data(incomplete_lifecycle_osm_tags):
return {osm_models.t_osm_line: incomplete_lifecycle_osm_tags}
@pytest.fixture
def non_lifecycle_data(non_lifecycle_osm_tags):
return {osm_models.t_osm_line: non_lifecycle_osm_tags}
@pytest.fixture
def lifecycle_osm_tags(non_lifecycle_osm_tags, osm_status, major_tag_key):
osm_tags = dict(non_lifecycle_osm_tags)
major_tag_value = osm_tags.pop(major_tag_key)
osm_tags.update({major_tag_key: osm_status, 'tags': {osm_status: major_tag_value}})
assert len(osm_tags) == len(non_lifecycle_osm_tags) + 1
return osm_tags
@pytest.fixture
def incomplete_lifecycle_osm_tags(non_lifecycle_osm_tags, osm_status, major_tag_key):
osm_tags = dict(non_lifecycle_osm_tags)
osm_tags.update({major_tag_key: osm_status})
assert len(osm_tags) == len(non_lifecycle_osm_tags)
return osm_tags
@pytest.fixture
def non_lifecycle_osm_tags(non_lifecycle_osm_tags_and_expected_nonop_subtype):
osm_tags, _ = non_lifecycle_osm_tags_and_expected_nonop_subtype
return osm_tags
@pytest.fixture
def major_tag_key(non_lifecycle_osm_tags):
major_keys = MAJOR_KEYS.intersection(non_lifecycle_osm_tags)
assert len(major_keys) == 1
return next(iter(major_keys))
@pytest.fixture
def expected_nonop_subtype(non_lifecycle_osm_tags_and_expected_nonop_subtype):
_, subtype = non_lifecycle_osm_tags_and_expected_nonop_subtype
return subtype
@pytest.fixture
def osm_status(osm_status_and_expected_osmaxx_status):
status, _ = osm_status_and_expected_osmaxx_status
return status
@pytest.fixture
def expected_osmaxx_status(osm_status_and_expected_osmaxx_status):
_, osmaxx_status = osm_status_and_expected_osmaxx_status
return osmaxx_status
@pytest.fixture(
params=CORRESPONDING_OSMAXX_WAY_TYPES_FOR_OSM_TAG_COMBINATIONS.items(),
ids=[str(tag_combination) for tag_combination in CORRESPONDING_OSMAXX_WAY_TYPES_FOR_OSM_TAG_COMBINATIONS.keys()],
)
def non_lifecycle_osm_tags_and_expected_nonop_subtype(request):
return request.param
@pytest.fixture(
params=CORRESPONDING_OSMAXX_STATUSES_FOR_OSM_STATUSES.items(),
ids=list(CORRESPONDING_OSMAXX_STATUSES_FOR_OSM_STATUSES.keys()),
)
def osm_status_and_expected_osmaxx_status(request):
return request.param
| mit |
mkost/djangocms-googlemap | schemamigration.py | 1 | 1460 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'mptt',
'cms',
'menus',
'djangocms_googlemap',
'south',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
]
ROOT_URLCONF = 'cms.urls'
def schemamigration():
# turn ``schemamigration.py --initial`` into
# ``manage.py schemamigration cmsplugin_disqus --initial`` and setup the
# enviroment
from django.conf import settings
from django.core.management import ManagementUtility
settings.configure(
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF=ROOT_URLCONF,
DATABASES=DATABASES,
TEMPLATE_CONTEXT_PROCESSORS=TEMPLATE_CONTEXT_PROCESSORS
)
argv = list(sys.argv)
argv.insert(1, 'schemamigration')
argv.insert(2, 'djangocms_googlemap')
utility = ManagementUtility(argv)
utility.execute()
if __name__ == "__main__":
schemamigration()
| bsd-3-clause |
harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/gslib/addlhelp/apis.py | 4 | 2736 | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about gsutil's interaction with Cloud Storage APIs."""
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
Google Cloud Storage offers two APIs: an XML and a JSON API. Gsutil can
interact with both APIs. By default, gsutil versions starting with 4.0
interact with the JSON API. If it is not possible to perform a command using
one of the APIs (for example, the notification command is not supported in
the XML API), gsutil will silently fall back to using the other API. Also,
gsutil will automatically fall back to using the XML API when interacting
with cloud storage providers that only support that API.
<B>CONFIGURING WHICH API IS USED</B>
To use a certain API for interacting with Google Cloud Storage, you can set
the 'prefer_api' variable in the "GSUtil" section of .boto config file to
'xml' or 'json' like so:
prefer_api = json
This will cause gsutil to use that API where possible (falling back to the
other API in cases as noted above). This applies to the gsutil test command
as well; it will run integration tests against the preferred API.
<B>PERFORMANCE DIFFERENCES BETWEEN APIS</B>
The XML API uses the boto framework. This framework re-reads downloaded files
to compute an MD5 hash if one is not present. For objects that do not
include MD5 hashes in their metadata (for example Google Cloud Storage
composite objects), this doubles the bandwidth consumed and elapsed time
needed by the download. Therefore, if you are working with composite objects,
it is recommended that you use the default value for prefer_api.
""")
class CommandOptions(HelpProvider):
"""Additional help about gsutil's interaction with Cloud Storage APIs."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='apis',
help_name_aliases=['XML', 'JSON', 'api', 'force_api', 'prefer_api'],
help_type='additional_help',
help_one_line_summary='Cloud Storage APIs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.